xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 2885)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221484Sek110237  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * DVA-based Adjustable Relpacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
116789Sahrens #include <sys/zfs_context.h>
117789Sahrens #include <sys/arc.h>
118789Sahrens #include <sys/refcount.h>
119789Sahrens #ifdef _KERNEL
120789Sahrens #include <sys/vmsystm.h>
121789Sahrens #include <vm/anon.h>
122789Sahrens #include <sys/fs/swapnode.h>
1231484Sek110237 #include <sys/dnlc.h>
124789Sahrens #endif
125789Sahrens #include <sys/callb.h>
126789Sahrens 
127789Sahrens static kmutex_t		arc_reclaim_thr_lock;
128789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
129789Sahrens static uint8_t		arc_thread_exit;
130789Sahrens 
1311484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1321484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1331484Sek110237 
134789Sahrens typedef enum arc_reclaim_strategy {
135789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
136789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
137789Sahrens } arc_reclaim_strategy_t;
138789Sahrens 
139789Sahrens /* number of seconds before growing cache again */
140789Sahrens static int		arc_grow_retry = 60;
141789Sahrens 
1422391Smaybee /*
1432638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1442638Sperrin  * (initialized in arc_init())
1452391Smaybee  */
1462638Sperrin static int		arc_min_prefetch_lifespan;
1472391Smaybee 
148789Sahrens static kmutex_t arc_reclaim_lock;
149789Sahrens static int arc_dead;
150789Sahrens 
151789Sahrens /*
152*2885Sahrens  * These tunables are for performance analysis.
153*2885Sahrens  */
154*2885Sahrens uint64_t zfs_arc_max;
155*2885Sahrens uint64_t zfs_arc_min;
156*2885Sahrens 
157*2885Sahrens /*
158789Sahrens  * Note that buffers can be on one of 5 states:
159789Sahrens  *	ARC_anon	- anonymous (discussed below)
1601544Seschrock  *	ARC_mru		- recently used, currently cached
1611544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1621544Seschrock  *	ARC_mfu		- frequently used, currently cached
1631544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
164789Sahrens  * When there are no active references to the buffer, they
165789Sahrens  * are linked onto one of the lists in arc.  These are the
166789Sahrens  * only buffers that can be evicted or deleted.
167789Sahrens  *
168789Sahrens  * Anonymous buffers are buffers that are not associated with
169789Sahrens  * a DVA.  These are buffers that hold dirty block copies
170789Sahrens  * before they are written to stable storage.  By definition,
1711544Seschrock  * they are "ref'd" and are considered part of arc_mru
172789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1731544Seschrock  * as they are written and migrate onto the arc_mru list.
174789Sahrens  */
175789Sahrens 
176789Sahrens typedef struct arc_state {
177789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
178789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
179789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
180789Sahrens 	uint64_t hits;
181789Sahrens 	kmutex_t mtx;
182789Sahrens } arc_state_t;
183789Sahrens 
184789Sahrens /* The 5 states: */
185789Sahrens static arc_state_t ARC_anon;
1861544Seschrock static arc_state_t ARC_mru;
1871544Seschrock static arc_state_t ARC_mru_ghost;
1881544Seschrock static arc_state_t ARC_mfu;
1891544Seschrock static arc_state_t ARC_mfu_ghost;
190789Sahrens 
191789Sahrens static struct arc {
192789Sahrens 	arc_state_t 	*anon;
1931544Seschrock 	arc_state_t	*mru;
1941544Seschrock 	arc_state_t	*mru_ghost;
1951544Seschrock 	arc_state_t	*mfu;
1961544Seschrock 	arc_state_t	*mfu_ghost;
197789Sahrens 	uint64_t	size;		/* Actual total arc size */
1981544Seschrock 	uint64_t	p;		/* Target size (in bytes) of mru */
199789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
200789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
201789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
202789Sahrens 
203789Sahrens 	/* performance stats */
204789Sahrens 	uint64_t	hits;
205789Sahrens 	uint64_t	misses;
206789Sahrens 	uint64_t	deleted;
2072688Smaybee 	uint64_t	recycle_miss;
2082688Smaybee 	uint64_t	mutex_miss;
2092688Smaybee 	uint64_t	evict_skip;
210789Sahrens 	uint64_t	hash_elements;
211789Sahrens 	uint64_t	hash_elements_max;
212789Sahrens 	uint64_t	hash_collisions;
213789Sahrens 	uint64_t	hash_chains;
214789Sahrens 	uint32_t	hash_chain_max;
215789Sahrens 
216789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
217789Sahrens } arc;
218789Sahrens 
219789Sahrens static uint64_t arc_tempreserve;
220789Sahrens 
221789Sahrens typedef struct arc_callback arc_callback_t;
222789Sahrens 
223789Sahrens struct arc_callback {
224789Sahrens 	arc_done_func_t		*acb_done;
225789Sahrens 	void			*acb_private;
226789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
227789Sahrens 	arc_buf_t		*acb_buf;
228789Sahrens 	zio_t			*acb_zio_dummy;
229789Sahrens 	arc_callback_t		*acb_next;
230789Sahrens };
231789Sahrens 
232789Sahrens struct arc_buf_hdr {
233789Sahrens 	/* immutable */
234789Sahrens 	uint64_t		b_size;
235789Sahrens 	spa_t			*b_spa;
236789Sahrens 
237789Sahrens 	/* protected by hash lock */
238789Sahrens 	dva_t			b_dva;
239789Sahrens 	uint64_t		b_birth;
240789Sahrens 	uint64_t		b_cksum0;
241789Sahrens 
242789Sahrens 	arc_buf_hdr_t		*b_hash_next;
243789Sahrens 	arc_buf_t		*b_buf;
244789Sahrens 	uint32_t		b_flags;
2451544Seschrock 	uint32_t		b_datacnt;
246789Sahrens 
247789Sahrens 	kcondvar_t		b_cv;
248789Sahrens 	arc_callback_t		*b_acb;
249789Sahrens 
250789Sahrens 	/* protected by arc state mutex */
251789Sahrens 	arc_state_t		*b_state;
252789Sahrens 	list_node_t		b_arc_node;
253789Sahrens 
254789Sahrens 	/* updated atomically */
255789Sahrens 	clock_t			b_arc_access;
256789Sahrens 
257789Sahrens 	/* self protecting */
258789Sahrens 	refcount_t		b_refcnt;
259789Sahrens };
260789Sahrens 
2611544Seschrock static arc_buf_t *arc_eviction_list;
2621544Seschrock static kmutex_t arc_eviction_mtx;
2632688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
2642688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
2651544Seschrock 
2661544Seschrock #define	GHOST_STATE(state)	\
2671544Seschrock 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
2681544Seschrock 
269789Sahrens /*
270789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
271789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
272789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
273789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
274789Sahrens  * public flags, make sure not to smash the private ones.
275789Sahrens  */
276789Sahrens 
2771544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
278789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
279789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
280789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
2811544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
2822391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
283789Sahrens 
2841544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
285789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
286789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
287789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
2881544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
289789Sahrens 
290789Sahrens /*
291789Sahrens  * Hash table routines
292789Sahrens  */
293789Sahrens 
294789Sahrens #define	HT_LOCK_PAD	64
295789Sahrens 
296789Sahrens struct ht_lock {
297789Sahrens 	kmutex_t	ht_lock;
298789Sahrens #ifdef _KERNEL
299789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
300789Sahrens #endif
301789Sahrens };
302789Sahrens 
303789Sahrens #define	BUF_LOCKS 256
304789Sahrens typedef struct buf_hash_table {
305789Sahrens 	uint64_t ht_mask;
306789Sahrens 	arc_buf_hdr_t **ht_table;
307789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
308789Sahrens } buf_hash_table_t;
309789Sahrens 
310789Sahrens static buf_hash_table_t buf_hash_table;
311789Sahrens 
312789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
313789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
314789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
315789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
316789Sahrens #define	HDR_LOCK(buf) \
317789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
318789Sahrens 
319789Sahrens uint64_t zfs_crc64_table[256];
320789Sahrens 
321789Sahrens static uint64_t
322789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
323789Sahrens {
324789Sahrens 	uintptr_t spav = (uintptr_t)spa;
325789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
326789Sahrens 	uint64_t crc = -1ULL;
327789Sahrens 	int i;
328789Sahrens 
329789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
330789Sahrens 
331789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
332789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
333789Sahrens 
334789Sahrens 	crc ^= (spav>>8) ^ birth;
335789Sahrens 
336789Sahrens 	return (crc);
337789Sahrens }
338789Sahrens 
339789Sahrens #define	BUF_EMPTY(buf)						\
340789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
341789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
342789Sahrens 	(buf)->b_birth == 0)
343789Sahrens 
344789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
345789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
346789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
347789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
348789Sahrens 
349789Sahrens static arc_buf_hdr_t *
350789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
351789Sahrens {
352789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
353789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
354789Sahrens 	arc_buf_hdr_t *buf;
355789Sahrens 
356789Sahrens 	mutex_enter(hash_lock);
357789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
358789Sahrens 	    buf = buf->b_hash_next) {
359789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
360789Sahrens 			*lockp = hash_lock;
361789Sahrens 			return (buf);
362789Sahrens 		}
363789Sahrens 	}
364789Sahrens 	mutex_exit(hash_lock);
365789Sahrens 	*lockp = NULL;
366789Sahrens 	return (NULL);
367789Sahrens }
368789Sahrens 
369789Sahrens /*
370789Sahrens  * Insert an entry into the hash table.  If there is already an element
371789Sahrens  * equal to elem in the hash table, then the already existing element
372789Sahrens  * will be returned and the new element will not be inserted.
373789Sahrens  * Otherwise returns NULL.
374789Sahrens  */
375789Sahrens static arc_buf_hdr_t *
376789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
377789Sahrens {
378789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
379789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
380789Sahrens 	arc_buf_hdr_t *fbuf;
381789Sahrens 	uint32_t max, i;
382789Sahrens 
3831544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
384789Sahrens 	*lockp = hash_lock;
385789Sahrens 	mutex_enter(hash_lock);
386789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
387789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
388789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
389789Sahrens 			return (fbuf);
390789Sahrens 	}
391789Sahrens 
392789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
393789Sahrens 	buf_hash_table.ht_table[idx] = buf;
3941544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
395789Sahrens 
396789Sahrens 	/* collect some hash table performance data */
397789Sahrens 	if (i > 0) {
398789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
399789Sahrens 		if (i == 1)
400789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
401789Sahrens 	}
402789Sahrens 	while (i > (max = arc.hash_chain_max) &&
403789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
404789Sahrens 		continue;
405789Sahrens 	}
406789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
407789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
408789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
409789Sahrens 
410789Sahrens 	return (NULL);
411789Sahrens }
412789Sahrens 
413789Sahrens static void
414789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
415789Sahrens {
416789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
417789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
418789Sahrens 
419789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
4201544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
421789Sahrens 
422789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
423789Sahrens 	while ((fbuf = *bufp) != buf) {
424789Sahrens 		ASSERT(fbuf != NULL);
425789Sahrens 		bufp = &fbuf->b_hash_next;
426789Sahrens 	}
427789Sahrens 	*bufp = buf->b_hash_next;
428789Sahrens 	buf->b_hash_next = NULL;
4291544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
430789Sahrens 
431789Sahrens 	/* collect some hash table performance data */
432789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
433789Sahrens 	if (buf_hash_table.ht_table[idx] &&
434789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
435789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
436789Sahrens }
437789Sahrens 
438789Sahrens /*
439789Sahrens  * Global data structures and functions for the buf kmem cache.
440789Sahrens  */
441789Sahrens static kmem_cache_t *hdr_cache;
442789Sahrens static kmem_cache_t *buf_cache;
443789Sahrens 
444789Sahrens static void
445789Sahrens buf_fini(void)
446789Sahrens {
447789Sahrens 	int i;
448789Sahrens 
449789Sahrens 	kmem_free(buf_hash_table.ht_table,
450789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
451789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
452789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
453789Sahrens 	kmem_cache_destroy(hdr_cache);
454789Sahrens 	kmem_cache_destroy(buf_cache);
455789Sahrens }
456789Sahrens 
457789Sahrens /*
458789Sahrens  * Constructor callback - called when the cache is empty
459789Sahrens  * and a new buf is requested.
460789Sahrens  */
461789Sahrens /* ARGSUSED */
462789Sahrens static int
463789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
464789Sahrens {
465789Sahrens 	arc_buf_hdr_t *buf = vbuf;
466789Sahrens 
467789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
468789Sahrens 	refcount_create(&buf->b_refcnt);
469789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
470789Sahrens 	return (0);
471789Sahrens }
472789Sahrens 
473789Sahrens /*
474789Sahrens  * Destructor callback - called when a cached buf is
475789Sahrens  * no longer required.
476789Sahrens  */
477789Sahrens /* ARGSUSED */
478789Sahrens static void
479789Sahrens hdr_dest(void *vbuf, void *unused)
480789Sahrens {
481789Sahrens 	arc_buf_hdr_t *buf = vbuf;
482789Sahrens 
483789Sahrens 	refcount_destroy(&buf->b_refcnt);
484789Sahrens 	cv_destroy(&buf->b_cv);
485789Sahrens }
486789Sahrens 
4871544Seschrock static int arc_reclaim_needed(void);
488789Sahrens void arc_kmem_reclaim(void);
489789Sahrens 
490789Sahrens /*
491789Sahrens  * Reclaim callback -- invoked when memory is low.
492789Sahrens  */
493789Sahrens /* ARGSUSED */
494789Sahrens static void
495789Sahrens hdr_recl(void *unused)
496789Sahrens {
497789Sahrens 	dprintf("hdr_recl called\n");
4981544Seschrock 	if (arc_reclaim_needed())
4991544Seschrock 		arc_kmem_reclaim();
500789Sahrens }
501789Sahrens 
502789Sahrens static void
503789Sahrens buf_init(void)
504789Sahrens {
505789Sahrens 	uint64_t *ct;
5061544Seschrock 	uint64_t hsize = 1ULL << 12;
507789Sahrens 	int i, j;
508789Sahrens 
509789Sahrens 	/*
510789Sahrens 	 * The hash table is big enough to fill all of physical memory
5111544Seschrock 	 * with an average 64K block size.  The table will take up
5121544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
513789Sahrens 	 */
5141544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
515789Sahrens 		hsize <<= 1;
5161544Seschrock retry:
517789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
5181544Seschrock 	buf_hash_table.ht_table =
5191544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
5201544Seschrock 	if (buf_hash_table.ht_table == NULL) {
5211544Seschrock 		ASSERT(hsize > (1ULL << 8));
5221544Seschrock 		hsize >>= 1;
5231544Seschrock 		goto retry;
5241544Seschrock 	}
525789Sahrens 
526789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
527789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
528789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
529789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
530789Sahrens 
531789Sahrens 	for (i = 0; i < 256; i++)
532789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
533789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
534789Sahrens 
535789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
536789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
537789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
538789Sahrens 	}
539789Sahrens }
540789Sahrens 
541789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
542789Sahrens 
543789Sahrens static void
544789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
545789Sahrens {
546789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
547789Sahrens 
548789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
549789Sahrens 	    (ab->b_state != arc.anon)) {
5501544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
551789Sahrens 
552789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
553789Sahrens 		mutex_enter(&ab->b_state->mtx);
554789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
555789Sahrens 		list_remove(&ab->b_state->list, ab);
5561544Seschrock 		if (GHOST_STATE(ab->b_state)) {
5571544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
5581544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
5591544Seschrock 			delta = ab->b_size;
5601544Seschrock 		}
5611544Seschrock 		ASSERT(delta > 0);
5621544Seschrock 		ASSERT3U(ab->b_state->lsize, >=, delta);
5631544Seschrock 		atomic_add_64(&ab->b_state->lsize, -delta);
564789Sahrens 		mutex_exit(&ab->b_state->mtx);
5652391Smaybee 		/* remove the prefetch flag is we get a reference */
5662391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
5672391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
568789Sahrens 	}
569789Sahrens }
570789Sahrens 
571789Sahrens static int
572789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
573789Sahrens {
574789Sahrens 	int cnt;
575789Sahrens 
5761544Seschrock 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
5771544Seschrock 	ASSERT(!GHOST_STATE(ab->b_state));
578789Sahrens 
579789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
580789Sahrens 	    (ab->b_state != arc.anon)) {
581789Sahrens 
582789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
583789Sahrens 		mutex_enter(&ab->b_state->mtx);
584789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
585789Sahrens 		list_insert_head(&ab->b_state->list, ab);
5861544Seschrock 		ASSERT(ab->b_datacnt > 0);
5871544Seschrock 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
5881544Seschrock 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
589789Sahrens 		mutex_exit(&ab->b_state->mtx);
590789Sahrens 	}
591789Sahrens 	return (cnt);
592789Sahrens }
593789Sahrens 
594789Sahrens /*
595789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
596789Sahrens  * for the buffer must be held by the caller.
597789Sahrens  */
598789Sahrens static void
5991544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
600789Sahrens {
6011544Seschrock 	arc_state_t *old_state = ab->b_state;
6021544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
6031544Seschrock 	int from_delta, to_delta;
604789Sahrens 
605789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
6061544Seschrock 	ASSERT(new_state != old_state);
6071544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
6081544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
6091544Seschrock 
6101544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
611789Sahrens 
612789Sahrens 	/*
613789Sahrens 	 * If this buffer is evictable, transfer it from the
614789Sahrens 	 * old state list to the new state list.
615789Sahrens 	 */
6161544Seschrock 	if (refcnt == 0) {
6171544Seschrock 		if (old_state != arc.anon) {
6181544Seschrock 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
6191544Seschrock 
6201544Seschrock 			if (use_mutex)
6211544Seschrock 				mutex_enter(&old_state->mtx);
6221544Seschrock 
6231544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
6241544Seschrock 			list_remove(&old_state->list, ab);
625789Sahrens 
6262391Smaybee 			/*
6272391Smaybee 			 * If prefetching out of the ghost cache,
6282391Smaybee 			 * we will have a non-null datacnt.
6292391Smaybee 			 */
6302391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
6312391Smaybee 				/* ghost elements have a ghost size */
6321544Seschrock 				ASSERT(ab->b_buf == NULL);
6331544Seschrock 				from_delta = ab->b_size;
634789Sahrens 			}
6351544Seschrock 			ASSERT3U(old_state->lsize, >=, from_delta);
6361544Seschrock 			atomic_add_64(&old_state->lsize, -from_delta);
6371544Seschrock 
6381544Seschrock 			if (use_mutex)
6391544Seschrock 				mutex_exit(&old_state->mtx);
640789Sahrens 		}
641789Sahrens 		if (new_state != arc.anon) {
6421544Seschrock 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
643789Sahrens 
6441544Seschrock 			if (use_mutex)
645789Sahrens 				mutex_enter(&new_state->mtx);
6461544Seschrock 
647789Sahrens 			list_insert_head(&new_state->list, ab);
6481544Seschrock 
6491544Seschrock 			/* ghost elements have a ghost size */
6501544Seschrock 			if (GHOST_STATE(new_state)) {
6511544Seschrock 				ASSERT(ab->b_datacnt == 0);
6521544Seschrock 				ASSERT(ab->b_buf == NULL);
6531544Seschrock 				to_delta = ab->b_size;
6541544Seschrock 			}
6551544Seschrock 			atomic_add_64(&new_state->lsize, to_delta);
6561544Seschrock 			ASSERT3U(new_state->size + to_delta, >=,
6571544Seschrock 			    new_state->lsize);
6581544Seschrock 
6591544Seschrock 			if (use_mutex)
660789Sahrens 				mutex_exit(&new_state->mtx);
661789Sahrens 		}
662789Sahrens 	}
663789Sahrens 
664789Sahrens 	ASSERT(!BUF_EMPTY(ab));
6651544Seschrock 	if (new_state == arc.anon && old_state != arc.anon) {
666789Sahrens 		buf_hash_remove(ab);
667789Sahrens 	}
668789Sahrens 
6691544Seschrock 	/* adjust state sizes */
6701544Seschrock 	if (to_delta)
6711544Seschrock 		atomic_add_64(&new_state->size, to_delta);
6721544Seschrock 	if (from_delta) {
6731544Seschrock 		ASSERT3U(old_state->size, >=, from_delta);
6741544Seschrock 		atomic_add_64(&old_state->size, -from_delta);
675789Sahrens 	}
676789Sahrens 	ab->b_state = new_state;
677789Sahrens }
678789Sahrens 
679789Sahrens arc_buf_t *
680789Sahrens arc_buf_alloc(spa_t *spa, int size, void *tag)
681789Sahrens {
682789Sahrens 	arc_buf_hdr_t *hdr;
683789Sahrens 	arc_buf_t *buf;
684789Sahrens 
685789Sahrens 	ASSERT3U(size, >, 0);
686789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
687789Sahrens 	ASSERT(BUF_EMPTY(hdr));
688789Sahrens 	hdr->b_size = size;
689789Sahrens 	hdr->b_spa = spa;
690789Sahrens 	hdr->b_state = arc.anon;
691789Sahrens 	hdr->b_arc_access = 0;
692789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
693789Sahrens 	buf->b_hdr = hdr;
6942688Smaybee 	buf->b_data = NULL;
6951544Seschrock 	buf->b_efunc = NULL;
6961544Seschrock 	buf->b_private = NULL;
697789Sahrens 	buf->b_next = NULL;
698789Sahrens 	hdr->b_buf = buf;
6992688Smaybee 	arc_get_data_buf(buf);
7001544Seschrock 	hdr->b_datacnt = 1;
701789Sahrens 	hdr->b_flags = 0;
702789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
703789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
704789Sahrens 
705789Sahrens 	return (buf);
706789Sahrens }
707789Sahrens 
7082688Smaybee static arc_buf_t *
7092688Smaybee arc_buf_clone(arc_buf_t *from)
7101544Seschrock {
7112688Smaybee 	arc_buf_t *buf;
7122688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
7132688Smaybee 	uint64_t size = hdr->b_size;
7141544Seschrock 
7152688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
7162688Smaybee 	buf->b_hdr = hdr;
7172688Smaybee 	buf->b_data = NULL;
7182688Smaybee 	buf->b_efunc = NULL;
7192688Smaybee 	buf->b_private = NULL;
7202688Smaybee 	buf->b_next = hdr->b_buf;
7212688Smaybee 	hdr->b_buf = buf;
7222688Smaybee 	arc_get_data_buf(buf);
7232688Smaybee 	bcopy(from->b_data, buf->b_data, size);
7242688Smaybee 	hdr->b_datacnt += 1;
7252688Smaybee 	return (buf);
7261544Seschrock }
7271544Seschrock 
7281544Seschrock void
7291544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
7301544Seschrock {
7312724Smaybee 	arc_buf_hdr_t *hdr = buf->b_hdr;
7321544Seschrock 	kmutex_t *hash_lock;
7331544Seschrock 
7342724Smaybee 	/*
7352724Smaybee 	 * Check to see if this buffer is currently being evicted via
7362724Smaybee 	 * arc_do_user_evicts().  We can do this without holding any
7372724Smaybee 	 * locks because if we happen to obtain the header before its
7382724Smaybee 	 * cleared, we will find b_data is NULL later.
7392724Smaybee 	 */
7402724Smaybee 	if (hdr == NULL)
7412724Smaybee 		return;
7422724Smaybee 
7432724Smaybee 	hash_lock = HDR_LOCK(hdr);
7442724Smaybee 	mutex_enter(hash_lock);
7451544Seschrock 	if (buf->b_data == NULL) {
7461544Seschrock 		/*
7471544Seschrock 		 * This buffer is evicted.
7481544Seschrock 		 */
7492724Smaybee 		mutex_exit(hash_lock);
7501544Seschrock 		return;
7511544Seschrock 	}
7521544Seschrock 
7532724Smaybee 	ASSERT(buf->b_hdr == hdr);
7542724Smaybee 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
7551544Seschrock 	add_reference(hdr, hash_lock, tag);
7562688Smaybee 	arc_access(hdr, hash_lock);
7572688Smaybee 	mutex_exit(hash_lock);
7581544Seschrock 	atomic_add_64(&arc.hits, 1);
7591544Seschrock }
7601544Seschrock 
761789Sahrens static void
7622688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
7631544Seschrock {
7641544Seschrock 	arc_buf_t **bufp;
7651544Seschrock 
7661544Seschrock 	/* free up data associated with the buf */
7671544Seschrock 	if (buf->b_data) {
7681544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
7691544Seschrock 		uint64_t size = buf->b_hdr->b_size;
7701544Seschrock 
7712688Smaybee 		if (!recycle) {
7722688Smaybee 			zio_buf_free(buf->b_data, size);
7732688Smaybee 			atomic_add_64(&arc.size, -size);
7742688Smaybee 		}
7751544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
7761544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
7771544Seschrock 			ASSERT(state != arc.anon);
7781544Seschrock 			ASSERT3U(state->lsize, >=, size);
7791544Seschrock 			atomic_add_64(&state->lsize, -size);
7801544Seschrock 		}
7811544Seschrock 		ASSERT3U(state->size, >=, size);
7821544Seschrock 		atomic_add_64(&state->size, -size);
7831544Seschrock 		buf->b_data = NULL;
7841544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
7851544Seschrock 		buf->b_hdr->b_datacnt -= 1;
7861544Seschrock 	}
7871544Seschrock 
7881544Seschrock 	/* only remove the buf if requested */
7891544Seschrock 	if (!all)
7901544Seschrock 		return;
7911544Seschrock 
7921544Seschrock 	/* remove the buf from the hdr list */
7931544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
7941544Seschrock 		continue;
7951544Seschrock 	*bufp = buf->b_next;
7961544Seschrock 
7971544Seschrock 	ASSERT(buf->b_efunc == NULL);
7981544Seschrock 
7991544Seschrock 	/* clean up the buf */
8001544Seschrock 	buf->b_hdr = NULL;
8011544Seschrock 	kmem_cache_free(buf_cache, buf);
8021544Seschrock }
8031544Seschrock 
8041544Seschrock static void
8051544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
806789Sahrens {
807789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
808789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
8091544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
810789Sahrens 
811789Sahrens 	if (!BUF_EMPTY(hdr)) {
8121544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
813789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
814789Sahrens 		hdr->b_birth = 0;
815789Sahrens 		hdr->b_cksum0 = 0;
816789Sahrens 	}
8171544Seschrock 	while (hdr->b_buf) {
818789Sahrens 		arc_buf_t *buf = hdr->b_buf;
819789Sahrens 
8201544Seschrock 		if (buf->b_efunc) {
8211544Seschrock 			mutex_enter(&arc_eviction_mtx);
8221544Seschrock 			ASSERT(buf->b_hdr != NULL);
8232688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
8241544Seschrock 			hdr->b_buf = buf->b_next;
8251544Seschrock 			buf->b_next = arc_eviction_list;
8261544Seschrock 			arc_eviction_list = buf;
8271544Seschrock 			mutex_exit(&arc_eviction_mtx);
8281544Seschrock 		} else {
8292688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
8301544Seschrock 		}
831789Sahrens 	}
8321544Seschrock 
833789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
834789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
835789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
836789Sahrens 	kmem_cache_free(hdr_cache, hdr);
837789Sahrens }
838789Sahrens 
839789Sahrens void
840789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
841789Sahrens {
842789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
8431544Seschrock 	int hashed = hdr->b_state != arc.anon;
8441544Seschrock 
8451544Seschrock 	ASSERT(buf->b_efunc == NULL);
8461544Seschrock 	ASSERT(buf->b_data != NULL);
8471544Seschrock 
8481544Seschrock 	if (hashed) {
8491544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
8501544Seschrock 
8511544Seschrock 		mutex_enter(hash_lock);
8521544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
8531544Seschrock 		if (hdr->b_datacnt > 1)
8542688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
8551544Seschrock 		else
8561544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
8571544Seschrock 		mutex_exit(hash_lock);
8581544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
8591544Seschrock 		int destroy_hdr;
8601544Seschrock 		/*
8611544Seschrock 		 * We are in the middle of an async write.  Don't destroy
8621544Seschrock 		 * this buffer unless the write completes before we finish
8631544Seschrock 		 * decrementing the reference count.
8641544Seschrock 		 */
8651544Seschrock 		mutex_enter(&arc_eviction_mtx);
8661544Seschrock 		(void) remove_reference(hdr, NULL, tag);
8671544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
8681544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
8691544Seschrock 		mutex_exit(&arc_eviction_mtx);
8701544Seschrock 		if (destroy_hdr)
8711544Seschrock 			arc_hdr_destroy(hdr);
8721544Seschrock 	} else {
8731544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
8741544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
8752688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
8761544Seschrock 		} else {
8771544Seschrock 			arc_hdr_destroy(hdr);
8781544Seschrock 		}
8791544Seschrock 	}
8801544Seschrock }
8811544Seschrock 
8821544Seschrock int
8831544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
8841544Seschrock {
8851544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
886789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
8871544Seschrock 	int no_callback = (buf->b_efunc == NULL);
8881544Seschrock 
8891544Seschrock 	if (hdr->b_state == arc.anon) {
8901544Seschrock 		arc_buf_free(buf, tag);
8911544Seschrock 		return (no_callback);
8921544Seschrock 	}
893789Sahrens 
894789Sahrens 	mutex_enter(hash_lock);
8951544Seschrock 	ASSERT(hdr->b_state != arc.anon);
8961544Seschrock 	ASSERT(buf->b_data != NULL);
897789Sahrens 
8981544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
8991544Seschrock 	if (hdr->b_datacnt > 1) {
9001544Seschrock 		if (no_callback)
9012688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9021544Seschrock 	} else if (no_callback) {
9031544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
9041544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
905789Sahrens 	}
9061544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
9071544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
908789Sahrens 	mutex_exit(hash_lock);
9091544Seschrock 	return (no_callback);
910789Sahrens }
911789Sahrens 
912789Sahrens int
913789Sahrens arc_buf_size(arc_buf_t *buf)
914789Sahrens {
915789Sahrens 	return (buf->b_hdr->b_size);
916789Sahrens }
917789Sahrens 
918789Sahrens /*
919789Sahrens  * Evict buffers from list until we've removed the specified number of
920789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
9212688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
9222688Smaybee  * - look for a buffer to evict that is `bytes' long.
9232688Smaybee  * - return the data block from this buffer rather than freeing it.
9242688Smaybee  * This flag is used by callers that are trying to make space for a
9252688Smaybee  * new buffer in a full arc cache.
926789Sahrens  */
9272688Smaybee static void *
9282688Smaybee arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle)
929789Sahrens {
930789Sahrens 	arc_state_t *evicted_state;
9312688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
932789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
933789Sahrens 	kmutex_t *hash_lock;
9342688Smaybee 	boolean_t have_lock;
9352688Smaybee 	void *steal = NULL;
936789Sahrens 
9371544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
938789Sahrens 
9391544Seschrock 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
940789Sahrens 
941789Sahrens 	mutex_enter(&state->mtx);
942789Sahrens 	mutex_enter(&evicted_state->mtx);
943789Sahrens 
944789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
945789Sahrens 		ab_prev = list_prev(&state->list, ab);
9462391Smaybee 		/* prefetch buffers have a minimum lifespan */
9472688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
9482688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
9492688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
9502391Smaybee 			skipped++;
9512391Smaybee 			continue;
9522391Smaybee 		}
9532688Smaybee 		if (recycle && (ab->b_size != bytes || ab->b_datacnt > 1))
9542688Smaybee 			continue;
955789Sahrens 		hash_lock = HDR_LOCK(ab);
9562688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
9572688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
958789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
9591544Seschrock 			ASSERT(ab->b_datacnt > 0);
9601544Seschrock 			while (ab->b_buf) {
9611544Seschrock 				arc_buf_t *buf = ab->b_buf;
9622688Smaybee 				if (buf->b_data) {
9631544Seschrock 					bytes_evicted += ab->b_size;
9642688Smaybee 					if (recycle)
9652688Smaybee 						steal = buf->b_data;
9662688Smaybee 				}
9671544Seschrock 				if (buf->b_efunc) {
9681544Seschrock 					mutex_enter(&arc_eviction_mtx);
9692688Smaybee 					arc_buf_destroy(buf, recycle, FALSE);
9701544Seschrock 					ab->b_buf = buf->b_next;
9711544Seschrock 					buf->b_next = arc_eviction_list;
9721544Seschrock 					arc_eviction_list = buf;
9731544Seschrock 					mutex_exit(&arc_eviction_mtx);
9741544Seschrock 				} else {
9752688Smaybee 					arc_buf_destroy(buf, recycle, TRUE);
9761544Seschrock 				}
9771544Seschrock 			}
9781544Seschrock 			ASSERT(ab->b_datacnt == 0);
979789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
9801544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
9811544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
982789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
9832688Smaybee 			if (!have_lock)
9842688Smaybee 				mutex_exit(hash_lock);
9851544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
986789Sahrens 				break;
987789Sahrens 		} else {
9882688Smaybee 			missed += 1;
989789Sahrens 		}
990789Sahrens 	}
991789Sahrens 	mutex_exit(&evicted_state->mtx);
992789Sahrens 	mutex_exit(&state->mtx);
993789Sahrens 
994789Sahrens 	if (bytes_evicted < bytes)
995789Sahrens 		dprintf("only evicted %lld bytes from %x",
996789Sahrens 		    (longlong_t)bytes_evicted, state);
997789Sahrens 
9982688Smaybee 	if (skipped)
9992688Smaybee 		atomic_add_64(&arc.evict_skip, skipped);
10002688Smaybee 	if (missed)
10012688Smaybee 		atomic_add_64(&arc.mutex_miss, missed);
10022688Smaybee 	return (steal);
1003789Sahrens }
1004789Sahrens 
1005789Sahrens /*
1006789Sahrens  * Remove buffers from list until we've removed the specified number of
1007789Sahrens  * bytes.  Destroy the buffers that are removed.
1008789Sahrens  */
1009789Sahrens static void
10101544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1011789Sahrens {
1012789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
1013789Sahrens 	kmutex_t *hash_lock;
10141544Seschrock 	uint64_t bytes_deleted = 0;
10151544Seschrock 	uint_t bufs_skipped = 0;
1016789Sahrens 
10171544Seschrock 	ASSERT(GHOST_STATE(state));
1018789Sahrens top:
1019789Sahrens 	mutex_enter(&state->mtx);
1020789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1021789Sahrens 		ab_prev = list_prev(&state->list, ab);
1022789Sahrens 		hash_lock = HDR_LOCK(ab);
1023789Sahrens 		if (mutex_tryenter(hash_lock)) {
10242391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
10251544Seschrock 			ASSERT(ab->b_buf == NULL);
1026789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
1027789Sahrens 			mutex_exit(hash_lock);
1028789Sahrens 			atomic_add_64(&arc.deleted, 1);
10291544Seschrock 			bytes_deleted += ab->b_size;
10301544Seschrock 			arc_hdr_destroy(ab);
1031789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1032789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1033789Sahrens 				break;
1034789Sahrens 		} else {
1035789Sahrens 			if (bytes < 0) {
1036789Sahrens 				mutex_exit(&state->mtx);
1037789Sahrens 				mutex_enter(hash_lock);
1038789Sahrens 				mutex_exit(hash_lock);
1039789Sahrens 				goto top;
1040789Sahrens 			}
1041789Sahrens 			bufs_skipped += 1;
1042789Sahrens 		}
1043789Sahrens 	}
1044789Sahrens 	mutex_exit(&state->mtx);
1045789Sahrens 
1046789Sahrens 	if (bufs_skipped) {
10472688Smaybee 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1048789Sahrens 		ASSERT(bytes >= 0);
1049789Sahrens 	}
1050789Sahrens 
1051789Sahrens 	if (bytes_deleted < bytes)
1052789Sahrens 		dprintf("only deleted %lld bytes from %p",
1053789Sahrens 		    (longlong_t)bytes_deleted, state);
1054789Sahrens }
1055789Sahrens 
1056789Sahrens static void
1057789Sahrens arc_adjust(void)
1058789Sahrens {
1059789Sahrens 	int64_t top_sz, mru_over, arc_over;
1060789Sahrens 
10611544Seschrock 	top_sz = arc.anon->size + arc.mru->size;
1062789Sahrens 
10631544Seschrock 	if (top_sz > arc.p && arc.mru->lsize > 0) {
10641544Seschrock 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
10652688Smaybee 		(void) arc_evict(arc.mru, toevict, FALSE);
10661544Seschrock 		top_sz = arc.anon->size + arc.mru->size;
1067789Sahrens 	}
1068789Sahrens 
10691544Seschrock 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1070789Sahrens 
1071789Sahrens 	if (mru_over > 0) {
10721544Seschrock 		if (arc.mru_ghost->lsize > 0) {
10731544Seschrock 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
10741544Seschrock 			arc_evict_ghost(arc.mru_ghost, todelete);
1075789Sahrens 		}
1076789Sahrens 	}
1077789Sahrens 
1078789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
10791544Seschrock 		int64_t tbl_over;
1080789Sahrens 
10811544Seschrock 		if (arc.mfu->lsize > 0) {
10821544Seschrock 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
10832688Smaybee 			(void) arc_evict(arc.mfu, toevict, FALSE);
1084789Sahrens 		}
1085789Sahrens 
10861544Seschrock 		tbl_over = arc.size + arc.mru_ghost->lsize +
10871544Seschrock 		    arc.mfu_ghost->lsize - arc.c*2;
1088789Sahrens 
10891544Seschrock 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
10901544Seschrock 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
10911544Seschrock 			arc_evict_ghost(arc.mfu_ghost, todelete);
1092789Sahrens 		}
1093789Sahrens 	}
1094789Sahrens }
1095789Sahrens 
10961544Seschrock static void
10971544Seschrock arc_do_user_evicts(void)
10981544Seschrock {
10991544Seschrock 	mutex_enter(&arc_eviction_mtx);
11001544Seschrock 	while (arc_eviction_list != NULL) {
11011544Seschrock 		arc_buf_t *buf = arc_eviction_list;
11021544Seschrock 		arc_eviction_list = buf->b_next;
11031544Seschrock 		buf->b_hdr = NULL;
11041544Seschrock 		mutex_exit(&arc_eviction_mtx);
11051544Seschrock 
11061819Smaybee 		if (buf->b_efunc != NULL)
11071819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
11081544Seschrock 
11091544Seschrock 		buf->b_efunc = NULL;
11101544Seschrock 		buf->b_private = NULL;
11111544Seschrock 		kmem_cache_free(buf_cache, buf);
11121544Seschrock 		mutex_enter(&arc_eviction_mtx);
11131544Seschrock 	}
11141544Seschrock 	mutex_exit(&arc_eviction_mtx);
11151544Seschrock }
11161544Seschrock 
1117789Sahrens /*
1118789Sahrens  * Flush all *evictable* data from the cache.
1119789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1120789Sahrens  */
1121789Sahrens void
1122789Sahrens arc_flush(void)
1123789Sahrens {
11242688Smaybee 	while (list_head(&arc.mru->list))
11252688Smaybee 		(void) arc_evict(arc.mru, -1, FALSE);
11262688Smaybee 	while (list_head(&arc.mfu->list))
11272688Smaybee 		(void) arc_evict(arc.mfu, -1, FALSE);
1128789Sahrens 
11291544Seschrock 	arc_evict_ghost(arc.mru_ghost, -1);
11301544Seschrock 	arc_evict_ghost(arc.mfu_ghost, -1);
11311544Seschrock 
11321544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
11331544Seschrock 	arc_do_user_evicts();
11341544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
11351544Seschrock 	ASSERT(arc_eviction_list == NULL);
1136789Sahrens }
1137789Sahrens 
11382391Smaybee int arc_kmem_reclaim_shift = 5;		/* log2(fraction of arc to reclaim) */
11392391Smaybee 
1140789Sahrens void
1141789Sahrens arc_kmem_reclaim(void)
1142789Sahrens {
11432048Sstans 	uint64_t to_free;
11442048Sstans 
1145789Sahrens 	/*
1146789Sahrens 	 * We need arc_reclaim_lock because we don't want multiple
1147789Sahrens 	 * threads trying to reclaim concurrently.
1148789Sahrens 	 */
1149789Sahrens 
1150789Sahrens 	/*
1151789Sahrens 	 * umem calls the reclaim func when we destroy the buf cache,
1152789Sahrens 	 * which is after we do arc_fini().  So we set a flag to prevent
1153789Sahrens 	 * accessing the destroyed mutexes and lists.
1154789Sahrens 	 */
1155789Sahrens 	if (arc_dead)
1156789Sahrens 		return;
1157789Sahrens 
11581544Seschrock 	if (arc.c <= arc.c_min)
11591544Seschrock 		return;
11601544Seschrock 
1161789Sahrens 	mutex_enter(&arc_reclaim_lock);
1162789Sahrens 
11632048Sstans #ifdef _KERNEL
11642391Smaybee 	to_free = MAX(arc.c >> arc_kmem_reclaim_shift, ptob(needfree));
11652048Sstans #else
11662391Smaybee 	to_free = arc.c >> arc_kmem_reclaim_shift;
11672048Sstans #endif
11682048Sstans 	if (arc.c > to_free)
11692048Sstans 		atomic_add_64(&arc.c, -to_free);
11702048Sstans 	else
11712048Sstans 		arc.c = arc.c_min;
11722048Sstans 
11732391Smaybee 	atomic_add_64(&arc.p, -(arc.p >> arc_kmem_reclaim_shift));
11741544Seschrock 	if (arc.c > arc.size)
11751544Seschrock 		arc.c = arc.size;
1176789Sahrens 	if (arc.c < arc.c_min)
1177789Sahrens 		arc.c = arc.c_min;
11781544Seschrock 	if (arc.p > arc.c)
11791544Seschrock 		arc.p = (arc.c >> 1);
11801544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1181789Sahrens 
1182789Sahrens 	arc_adjust();
1183789Sahrens 
1184789Sahrens 	mutex_exit(&arc_reclaim_lock);
1185789Sahrens }
1186789Sahrens 
1187789Sahrens static int
1188789Sahrens arc_reclaim_needed(void)
1189789Sahrens {
1190789Sahrens 	uint64_t extra;
1191789Sahrens 
1192789Sahrens #ifdef _KERNEL
11932048Sstans 
11942048Sstans 	if (needfree)
11952048Sstans 		return (1);
11962048Sstans 
1197789Sahrens 	/*
1198789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1199789Sahrens 	 */
1200789Sahrens 	extra = desfree;
1201789Sahrens 
1202789Sahrens 	/*
1203789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1204789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1205789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1206789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1207789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1208789Sahrens 	 */
1209789Sahrens 	if (freemem < lotsfree + needfree + extra)
1210789Sahrens 		return (1);
1211789Sahrens 
1212789Sahrens 	/*
1213789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1214789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1215789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1216789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1217789Sahrens 	 * circumstances from getting really dire.
1218789Sahrens 	 */
1219789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1220789Sahrens 		return (1);
1221789Sahrens 
12221936Smaybee #if defined(__i386)
1223789Sahrens 	/*
1224789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1225789Sahrens 	 * kernel heap space before we ever run out of available physical
1226789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1227789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1228789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1229789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1230789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1231789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1232789Sahrens 	 * free)
1233789Sahrens 	 */
1234789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1235789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1236789Sahrens 		return (1);
1237789Sahrens #endif
1238789Sahrens 
1239789Sahrens #else
1240789Sahrens 	if (spa_get_random(100) == 0)
1241789Sahrens 		return (1);
1242789Sahrens #endif
1243789Sahrens 	return (0);
1244789Sahrens }
1245789Sahrens 
1246789Sahrens static void
1247789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1248789Sahrens {
1249789Sahrens 	size_t			i;
1250789Sahrens 	kmem_cache_t		*prev_cache = NULL;
1251789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
1252789Sahrens 
12531484Sek110237 #ifdef _KERNEL
12541484Sek110237 	/*
12551484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
12561484Sek110237 	 * up too much memory.
12571484Sek110237 	 */
12581505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
12591936Smaybee 
12601936Smaybee #if defined(__i386)
12611936Smaybee 	/*
12621936Smaybee 	 * Reclaim unused memory from all kmem caches.
12631936Smaybee 	 */
12641936Smaybee 	kmem_reap();
12651936Smaybee #endif
12661484Sek110237 #endif
12671484Sek110237 
1268789Sahrens 	/*
12691544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
12701544Seschrock 	 * reap free buffers from the arc kmem caches.
1271789Sahrens 	 */
1272789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
12731544Seschrock 		arc_kmem_reclaim();
1274789Sahrens 
1275789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1276789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1277789Sahrens 			prev_cache = zio_buf_cache[i];
1278789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1279789Sahrens 		}
1280789Sahrens 	}
12811544Seschrock 	kmem_cache_reap_now(buf_cache);
12821544Seschrock 	kmem_cache_reap_now(hdr_cache);
1283789Sahrens }
1284789Sahrens 
1285789Sahrens static void
1286789Sahrens arc_reclaim_thread(void)
1287789Sahrens {
1288789Sahrens 	clock_t			growtime = 0;
1289789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1290789Sahrens 	callb_cpr_t		cpr;
1291789Sahrens 
1292789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1293789Sahrens 
1294789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1295789Sahrens 	while (arc_thread_exit == 0) {
1296789Sahrens 		if (arc_reclaim_needed()) {
1297789Sahrens 
1298789Sahrens 			if (arc.no_grow) {
1299789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1300789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1301789Sahrens 				} else {
1302789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1303789Sahrens 				}
1304789Sahrens 			} else {
1305789Sahrens 				arc.no_grow = TRUE;
1306789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1307789Sahrens 				membar_producer();
1308789Sahrens 			}
1309789Sahrens 
1310789Sahrens 			/* reset the growth delay for every reclaim */
1311789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
13122856Snd150628 			ASSERT(growtime > 0);
1313789Sahrens 
1314789Sahrens 			arc_kmem_reap_now(last_reclaim);
1315789Sahrens 
1316789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1317789Sahrens 			arc.no_grow = FALSE;
1318789Sahrens 		}
1319789Sahrens 
13201544Seschrock 		if (arc_eviction_list != NULL)
13211544Seschrock 			arc_do_user_evicts();
13221544Seschrock 
1323789Sahrens 		/* block until needed, or one second, whichever is shorter */
1324789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1325789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1326789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1327789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1328789Sahrens 	}
1329789Sahrens 
1330789Sahrens 	arc_thread_exit = 0;
1331789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1332789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1333789Sahrens 	thread_exit();
1334789Sahrens }
1335789Sahrens 
13361544Seschrock /*
13371544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
13381544Seschrock  * the state that we are comming from.  This function is only called
13391544Seschrock  * when we are adding new content to the cache.
13401544Seschrock  */
1341789Sahrens static void
13421544Seschrock arc_adapt(int bytes, arc_state_t *state)
1343789Sahrens {
13441544Seschrock 	int mult;
13451544Seschrock 
13461544Seschrock 	ASSERT(bytes > 0);
1347789Sahrens 	/*
13481544Seschrock 	 * Adapt the target size of the MRU list:
13491544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
13501544Seschrock 	 *	  the target size of the MRU list.
13511544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
13521544Seschrock 	 *	  the target size of the MFU list by decreasing the
13531544Seschrock 	 *	  target size of the MRU list.
1354789Sahrens 	 */
13551544Seschrock 	if (state == arc.mru_ghost) {
13561544Seschrock 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
13571544Seschrock 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
13581544Seschrock 
13591544Seschrock 		arc.p = MIN(arc.c, arc.p + bytes * mult);
13601544Seschrock 	} else if (state == arc.mfu_ghost) {
13611544Seschrock 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
13621544Seschrock 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
13631544Seschrock 
13641544Seschrock 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
13651544Seschrock 	}
13661544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1367789Sahrens 
1368789Sahrens 	if (arc_reclaim_needed()) {
1369789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1370789Sahrens 		return;
1371789Sahrens 	}
1372789Sahrens 
1373789Sahrens 	if (arc.no_grow)
1374789Sahrens 		return;
1375789Sahrens 
13761544Seschrock 	if (arc.c >= arc.c_max)
13771544Seschrock 		return;
13781544Seschrock 
1379789Sahrens 	/*
13801544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
13811544Seschrock 	 * cache size, increment the target cache size
1382789Sahrens 	 */
13831544Seschrock 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
13841544Seschrock 		atomic_add_64(&arc.c, (int64_t)bytes);
1385789Sahrens 		if (arc.c > arc.c_max)
1386789Sahrens 			arc.c = arc.c_max;
13871544Seschrock 		else if (state == arc.anon)
13881544Seschrock 			atomic_add_64(&arc.p, (int64_t)bytes);
13891544Seschrock 		if (arc.p > arc.c)
13901544Seschrock 			arc.p = arc.c;
1391789Sahrens 	}
13921544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1393789Sahrens }
1394789Sahrens 
1395789Sahrens /*
13961544Seschrock  * Check if the cache has reached its limits and eviction is required
13971544Seschrock  * prior to insert.
1398789Sahrens  */
1399789Sahrens static int
1400789Sahrens arc_evict_needed()
1401789Sahrens {
1402789Sahrens 	if (arc_reclaim_needed())
1403789Sahrens 		return (1);
1404789Sahrens 
14051544Seschrock 	return (arc.size > arc.c);
1406789Sahrens }
1407789Sahrens 
1408789Sahrens /*
14092688Smaybee  * The buffer, supplied as the first argument, needs a data block.
14102688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
14112688Smaybee  * We have the following cases:
1412789Sahrens  *
14131544Seschrock  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1414789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1415789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1416789Sahrens  *
14171544Seschrock  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1418789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1419789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1420789Sahrens  * entries.
1421789Sahrens  *
14221544Seschrock  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1423789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1424789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1425789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1426789Sahrens  *
14271544Seschrock  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1428789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1429789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1430789Sahrens  */
1431789Sahrens static void
14322688Smaybee arc_get_data_buf(arc_buf_t *buf)
1433789Sahrens {
14342688Smaybee 	arc_state_t	*state = buf->b_hdr->b_state;
14352688Smaybee 	uint64_t	size = buf->b_hdr->b_size;
14362688Smaybee 
14372688Smaybee 	arc_adapt(size, state);
1438789Sahrens 
14392688Smaybee 	/*
14402688Smaybee 	 * We have not yet reached cache maximum size,
14412688Smaybee 	 * just allocate a new buffer.
14422688Smaybee 	 */
14432688Smaybee 	if (!arc_evict_needed()) {
14442688Smaybee 		buf->b_data = zio_buf_alloc(size);
14452688Smaybee 		atomic_add_64(&arc.size, size);
14462688Smaybee 		goto out;
14472688Smaybee 	}
14482688Smaybee 
14492688Smaybee 	/*
14502688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
14512688Smaybee 	 * will end up on the mru list; so steal space from there.
14522688Smaybee 	 */
14532688Smaybee 	if (state == arc.mfu_ghost)
14542688Smaybee 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
14552688Smaybee 	else if (state == arc.mru_ghost)
14562688Smaybee 		state = arc.mru;
1457789Sahrens 
14582688Smaybee 	if (state == arc.mru || state == arc.anon) {
14592688Smaybee 		uint64_t mru_used = arc.anon->size + arc.mru->size;
14602688Smaybee 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1461789Sahrens 	} else {
14622688Smaybee 		/* MFU cases */
14632688Smaybee 		uint64_t mfu_space = arc.c - arc.p;
14642688Smaybee 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
14652688Smaybee 	}
14662688Smaybee 	if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) {
14672688Smaybee 		(void) arc_evict(state, size, FALSE);
14682688Smaybee 		buf->b_data = zio_buf_alloc(size);
14692688Smaybee 		atomic_add_64(&arc.size, size);
14702688Smaybee 		atomic_add_64(&arc.recycle_miss, 1);
14712688Smaybee 		if (arc.size > arc.c)
14722688Smaybee 			arc_adjust();
14732688Smaybee 	}
14742688Smaybee 	ASSERT(buf->b_data != NULL);
14752688Smaybee out:
14762688Smaybee 	/*
14772688Smaybee 	 * Update the state size.  Note that ghost states have a
14782688Smaybee 	 * "ghost size" and so don't need to be updated.
14792688Smaybee 	 */
14802688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
14812688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
14822688Smaybee 
14832688Smaybee 		atomic_add_64(&hdr->b_state->size, size);
14842688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
14852688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
14862688Smaybee 			atomic_add_64(&hdr->b_state->lsize, size);
1487789Sahrens 		}
1488789Sahrens 	}
1489789Sahrens }
1490789Sahrens 
1491789Sahrens /*
1492789Sahrens  * This routine is called whenever a buffer is accessed.
14931544Seschrock  * NOTE: the hash lock is dropped in this function.
1494789Sahrens  */
1495789Sahrens static void
14962688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1497789Sahrens {
1498789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1499789Sahrens 
1500789Sahrens 	if (buf->b_state == arc.anon) {
1501789Sahrens 		/*
1502789Sahrens 		 * This buffer is not in the cache, and does not
1503789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1504789Sahrens 		 * to the MRU state.
1505789Sahrens 		 */
1506789Sahrens 
1507789Sahrens 		ASSERT(buf->b_arc_access == 0);
1508789Sahrens 		buf->b_arc_access = lbolt;
15091544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
15101544Seschrock 		arc_change_state(arc.mru, buf, hash_lock);
1511789Sahrens 
15121544Seschrock 	} else if (buf->b_state == arc.mru) {
1513789Sahrens 		/*
15142391Smaybee 		 * If this buffer is here because of a prefetch, then either:
15152391Smaybee 		 * - clear the flag if this is a "referencing" read
15162391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
15172391Smaybee 		 * or
15182391Smaybee 		 * - move the buffer to the head of the list if this is
15192391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
1520789Sahrens 		 */
1521789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
15222391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
15232391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
15242391Smaybee 				mutex_enter(&arc.mru->mtx);
15252391Smaybee 				list_remove(&arc.mru->list, buf);
15262391Smaybee 				list_insert_head(&arc.mru->list, buf);
15272391Smaybee 				mutex_exit(&arc.mru->mtx);
15282391Smaybee 			} else {
15292391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
15302391Smaybee 				atomic_add_64(&arc.mru->hits, 1);
15312391Smaybee 			}
15322391Smaybee 			buf->b_arc_access = lbolt;
1533789Sahrens 			return;
1534789Sahrens 		}
1535789Sahrens 
1536789Sahrens 		/*
1537789Sahrens 		 * This buffer has been "accessed" only once so far,
1538789Sahrens 		 * but it is still in the cache. Move it to the MFU
1539789Sahrens 		 * state.
1540789Sahrens 		 */
1541789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1542789Sahrens 			/*
1543789Sahrens 			 * More than 125ms have passed since we
1544789Sahrens 			 * instantiated this buffer.  Move it to the
1545789Sahrens 			 * most frequently used state.
1546789Sahrens 			 */
1547789Sahrens 			buf->b_arc_access = lbolt;
15481544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
15491544Seschrock 			arc_change_state(arc.mfu, buf, hash_lock);
1550789Sahrens 		}
15511544Seschrock 		atomic_add_64(&arc.mru->hits, 1);
15521544Seschrock 	} else if (buf->b_state == arc.mru_ghost) {
1553789Sahrens 		arc_state_t	*new_state;
1554789Sahrens 		/*
1555789Sahrens 		 * This buffer has been "accessed" recently, but
1556789Sahrens 		 * was evicted from the cache.  Move it to the
1557789Sahrens 		 * MFU state.
1558789Sahrens 		 */
1559789Sahrens 
1560789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
15611544Seschrock 			new_state = arc.mru;
15622391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
15632391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
15641544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1565789Sahrens 		} else {
15661544Seschrock 			new_state = arc.mfu;
15671544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1568789Sahrens 		}
1569789Sahrens 
1570789Sahrens 		buf->b_arc_access = lbolt;
1571789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1572789Sahrens 
15731544Seschrock 		atomic_add_64(&arc.mru_ghost->hits, 1);
15741544Seschrock 	} else if (buf->b_state == arc.mfu) {
1575789Sahrens 		/*
1576789Sahrens 		 * This buffer has been accessed more than once and is
1577789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1578789Sahrens 		 *
15792391Smaybee 		 * NOTE: an add_reference() that occurred when we did
15802391Smaybee 		 * the arc_read() will have kicked this off the list.
15812391Smaybee 		 * If it was a prefetch, we will explicitly move it to
15822391Smaybee 		 * the head of the list now.
1583789Sahrens 		 */
15842391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
15852391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
15862391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
15872391Smaybee 			mutex_enter(&arc.mfu->mtx);
15882391Smaybee 			list_remove(&arc.mfu->list, buf);
15892391Smaybee 			list_insert_head(&arc.mfu->list, buf);
15902391Smaybee 			mutex_exit(&arc.mfu->mtx);
15912391Smaybee 		}
15921544Seschrock 		atomic_add_64(&arc.mfu->hits, 1);
15932391Smaybee 		buf->b_arc_access = lbolt;
15941544Seschrock 	} else if (buf->b_state == arc.mfu_ghost) {
15952391Smaybee 		arc_state_t	*new_state = arc.mfu;
1596789Sahrens 		/*
1597789Sahrens 		 * This buffer has been accessed more than once but has
1598789Sahrens 		 * been evicted from the cache.  Move it back to the
1599789Sahrens 		 * MFU state.
1600789Sahrens 		 */
1601789Sahrens 
16022391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
16032391Smaybee 			/*
16042391Smaybee 			 * This is a prefetch access...
16052391Smaybee 			 * move this block back to the MRU state.
16062391Smaybee 			 */
16072391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
16082391Smaybee 			new_state = arc.mru;
16092391Smaybee 		}
16102391Smaybee 
1611789Sahrens 		buf->b_arc_access = lbolt;
16121544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
16132391Smaybee 		arc_change_state(new_state, buf, hash_lock);
1614789Sahrens 
16151544Seschrock 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1616789Sahrens 	} else {
1617789Sahrens 		ASSERT(!"invalid arc state");
1618789Sahrens 	}
1619789Sahrens }
1620789Sahrens 
1621789Sahrens /* a generic arc_done_func_t which you can use */
1622789Sahrens /* ARGSUSED */
1623789Sahrens void
1624789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1625789Sahrens {
1626789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
16271544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1628789Sahrens }
1629789Sahrens 
1630789Sahrens /* a generic arc_done_func_t which you can use */
1631789Sahrens void
1632789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1633789Sahrens {
1634789Sahrens 	arc_buf_t **bufp = arg;
1635789Sahrens 	if (zio && zio->io_error) {
16361544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1637789Sahrens 		*bufp = NULL;
1638789Sahrens 	} else {
1639789Sahrens 		*bufp = buf;
1640789Sahrens 	}
1641789Sahrens }
1642789Sahrens 
1643789Sahrens static void
1644789Sahrens arc_read_done(zio_t *zio)
1645789Sahrens {
16461589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1647789Sahrens 	arc_buf_t	*buf;
1648789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1649789Sahrens 	kmutex_t	*hash_lock;
1650789Sahrens 	arc_callback_t	*callback_list, *acb;
1651789Sahrens 	int		freeable = FALSE;
1652789Sahrens 
1653789Sahrens 	buf = zio->io_private;
1654789Sahrens 	hdr = buf->b_hdr;
1655789Sahrens 
16561589Smaybee 	/*
16571589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
16581589Smaybee 	 * prior to starting I/O.  We should find this header, since
16591589Smaybee 	 * it's in the hash table, and it should be legit since it's
16601589Smaybee 	 * not possible to evict it during the I/O.  The only possible
16611589Smaybee 	 * reason for it not to be found is if we were freed during the
16621589Smaybee 	 * read.
16631589Smaybee 	 */
16641589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1665789Sahrens 		    &hash_lock);
1666789Sahrens 
16671589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
16681589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1669789Sahrens 
1670789Sahrens 	/* byteswap if necessary */
1671789Sahrens 	callback_list = hdr->b_acb;
1672789Sahrens 	ASSERT(callback_list != NULL);
1673789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1674789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1675789Sahrens 
1676789Sahrens 	/* create copies of the data buffer for the callers */
1677789Sahrens 	abuf = buf;
1678789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1679789Sahrens 		if (acb->acb_done) {
16802688Smaybee 			if (abuf == NULL)
16812688Smaybee 				abuf = arc_buf_clone(buf);
1682789Sahrens 			acb->acb_buf = abuf;
1683789Sahrens 			abuf = NULL;
1684789Sahrens 		}
1685789Sahrens 	}
1686789Sahrens 	hdr->b_acb = NULL;
1687789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
16881544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
16891544Seschrock 	if (abuf == buf)
16901544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1691789Sahrens 
1692789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1693789Sahrens 
1694789Sahrens 	if (zio->io_error != 0) {
1695789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1696789Sahrens 		if (hdr->b_state != arc.anon)
1697789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
16981544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
16991544Seschrock 			buf_hash_remove(hdr);
1700789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
17012391Smaybee 		/* convert checksum errors into IO errors */
17021544Seschrock 		if (zio->io_error == ECKSUM)
17031544Seschrock 			zio->io_error = EIO;
1704789Sahrens 	}
1705789Sahrens 
17061544Seschrock 	/*
17072391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
17082391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
17092391Smaybee 	 * the cv_broadcast().
17101544Seschrock 	 */
17111544Seschrock 	cv_broadcast(&hdr->b_cv);
17121544Seschrock 
17131589Smaybee 	if (hash_lock) {
1714789Sahrens 		/*
1715789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1716789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1717789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1718789Sahrens 		 * getting confused).
1719789Sahrens 		 */
1720789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
17212688Smaybee 			arc_access(hdr, hash_lock);
17222688Smaybee 		mutex_exit(hash_lock);
1723789Sahrens 	} else {
1724789Sahrens 		/*
1725789Sahrens 		 * This block was freed while we waited for the read to
1726789Sahrens 		 * complete.  It has been removed from the hash table and
1727789Sahrens 		 * moved to the anonymous state (so that it won't show up
1728789Sahrens 		 * in the cache).
1729789Sahrens 		 */
1730789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1731789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1732789Sahrens 	}
1733789Sahrens 
1734789Sahrens 	/* execute each callback and free its structure */
1735789Sahrens 	while ((acb = callback_list) != NULL) {
1736789Sahrens 		if (acb->acb_done)
1737789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1738789Sahrens 
1739789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1740789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1741789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1742789Sahrens 		}
1743789Sahrens 
1744789Sahrens 		callback_list = acb->acb_next;
1745789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1746789Sahrens 	}
1747789Sahrens 
1748789Sahrens 	if (freeable)
17491544Seschrock 		arc_hdr_destroy(hdr);
1750789Sahrens }
1751789Sahrens 
1752789Sahrens /*
1753789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1754789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1755789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1756789Sahrens  * in the callback will be NULL in this case, since no IO was
1757789Sahrens  * required.  If the block is not in the cache pass the read request
1758789Sahrens  * on to the spa with a substitute callback function, so that the
1759789Sahrens  * requested block will be added to the cache.
1760789Sahrens  *
1761789Sahrens  * If a read request arrives for a block that has a read in-progress,
1762789Sahrens  * either wait for the in-progress read to complete (and return the
1763789Sahrens  * results); or, if this is a read with a "done" func, add a record
1764789Sahrens  * to the read to invoke the "done" func when the read completes,
1765789Sahrens  * and return; or just return.
1766789Sahrens  *
1767789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1768789Sahrens  * for readers of this block.
1769789Sahrens  */
1770789Sahrens int
1771789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1772789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
17732391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
1774789Sahrens {
1775789Sahrens 	arc_buf_hdr_t *hdr;
1776789Sahrens 	arc_buf_t *buf;
1777789Sahrens 	kmutex_t *hash_lock;
1778789Sahrens 	zio_t	*rzio;
1779789Sahrens 
1780789Sahrens top:
1781789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
17821544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
1783789Sahrens 
17842391Smaybee 		*arc_flags |= ARC_CACHED;
17852391Smaybee 
1786789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
17872391Smaybee 
17882391Smaybee 			if (*arc_flags & ARC_WAIT) {
17892391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
17902391Smaybee 				mutex_exit(hash_lock);
17912391Smaybee 				goto top;
17922391Smaybee 			}
17932391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
17942391Smaybee 
17952391Smaybee 			if (done) {
1796789Sahrens 				arc_callback_t	*acb = NULL;
1797789Sahrens 
1798789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1799789Sahrens 				    KM_SLEEP);
1800789Sahrens 				acb->acb_done = done;
1801789Sahrens 				acb->acb_private = private;
1802789Sahrens 				acb->acb_byteswap = swap;
1803789Sahrens 				if (pio != NULL)
1804789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1805789Sahrens 					    spa, NULL, NULL, flags);
1806789Sahrens 
1807789Sahrens 				ASSERT(acb->acb_done != NULL);
1808789Sahrens 				acb->acb_next = hdr->b_acb;
1809789Sahrens 				hdr->b_acb = acb;
1810789Sahrens 				add_reference(hdr, hash_lock, private);
1811789Sahrens 				mutex_exit(hash_lock);
1812789Sahrens 				return (0);
1813789Sahrens 			}
1814789Sahrens 			mutex_exit(hash_lock);
1815789Sahrens 			return (0);
1816789Sahrens 		}
1817789Sahrens 
18181544Seschrock 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1819789Sahrens 
18201544Seschrock 		if (done) {
18212688Smaybee 			add_reference(hdr, hash_lock, private);
18221544Seschrock 			/*
18231544Seschrock 			 * If this block is already in use, create a new
18241544Seschrock 			 * copy of the data so that we will be guaranteed
18251544Seschrock 			 * that arc_release() will always succeed.
18261544Seschrock 			 */
18271544Seschrock 			buf = hdr->b_buf;
18281544Seschrock 			ASSERT(buf);
18291544Seschrock 			ASSERT(buf->b_data);
18302688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
18311544Seschrock 				ASSERT(buf->b_efunc == NULL);
18321544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
18332688Smaybee 			} else {
18342688Smaybee 				buf = arc_buf_clone(buf);
18351544Seschrock 			}
18362391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
18372391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
18382391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
1839789Sahrens 		}
1840789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
18412688Smaybee 		arc_access(hdr, hash_lock);
18422688Smaybee 		mutex_exit(hash_lock);
1843789Sahrens 		atomic_add_64(&arc.hits, 1);
1844789Sahrens 		if (done)
1845789Sahrens 			done(NULL, buf, private);
1846789Sahrens 	} else {
1847789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1848789Sahrens 		arc_callback_t	*acb;
1849789Sahrens 
1850789Sahrens 		if (hdr == NULL) {
1851789Sahrens 			/* this block is not in the cache */
1852789Sahrens 			arc_buf_hdr_t	*exists;
1853789Sahrens 
1854789Sahrens 			buf = arc_buf_alloc(spa, size, private);
1855789Sahrens 			hdr = buf->b_hdr;
1856789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1857789Sahrens 			hdr->b_birth = bp->blk_birth;
1858789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1859789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1860789Sahrens 			if (exists) {
1861789Sahrens 				/* somebody beat us to the hash insert */
1862789Sahrens 				mutex_exit(hash_lock);
1863789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1864789Sahrens 				hdr->b_birth = 0;
1865789Sahrens 				hdr->b_cksum0 = 0;
18661544Seschrock 				(void) arc_buf_remove_ref(buf, private);
1867789Sahrens 				goto top; /* restart the IO request */
1868789Sahrens 			}
18692391Smaybee 			/* if this is a prefetch, we don't have a reference */
18702391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
18712391Smaybee 				(void) remove_reference(hdr, hash_lock,
18722391Smaybee 				    private);
18732391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
18742391Smaybee 			}
18752391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
18762391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
1877789Sahrens 		} else {
1878789Sahrens 			/* this block is in the ghost cache */
18791544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
18801544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
18812391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
18822391Smaybee 			ASSERT(hdr->b_buf == NULL);
1883789Sahrens 
18842391Smaybee 			/* if this is a prefetch, we don't have a reference */
18852391Smaybee 			if (*arc_flags & ARC_PREFETCH)
18862391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
18872391Smaybee 			else
18882391Smaybee 				add_reference(hdr, hash_lock, private);
1889789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
18901544Seschrock 			buf->b_hdr = hdr;
18912688Smaybee 			buf->b_data = NULL;
18921544Seschrock 			buf->b_efunc = NULL;
18931544Seschrock 			buf->b_private = NULL;
18941544Seschrock 			buf->b_next = NULL;
18951544Seschrock 			hdr->b_buf = buf;
18962688Smaybee 			arc_get_data_buf(buf);
18971544Seschrock 			ASSERT(hdr->b_datacnt == 0);
18981544Seschrock 			hdr->b_datacnt = 1;
18992391Smaybee 
1900789Sahrens 		}
1901789Sahrens 
1902789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1903789Sahrens 		acb->acb_done = done;
1904789Sahrens 		acb->acb_private = private;
1905789Sahrens 		acb->acb_byteswap = swap;
1906789Sahrens 
1907789Sahrens 		ASSERT(hdr->b_acb == NULL);
1908789Sahrens 		hdr->b_acb = acb;
1909789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1910789Sahrens 
1911789Sahrens 		/*
1912789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
1913789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
1914789Sahrens 		 * the header will be marked as I/O in progress and have an
1915789Sahrens 		 * attached buffer.  At this point, anybody who finds this
1916789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
1917789Sahrens 		 */
1918789Sahrens 
19191544Seschrock 		if (GHOST_STATE(hdr->b_state))
19202688Smaybee 			arc_access(hdr, hash_lock);
19212688Smaybee 		mutex_exit(hash_lock);
1922789Sahrens 
1923789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
19241596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
19251596Sahrens 		    zbookmark_t *, zb);
1926789Sahrens 		atomic_add_64(&arc.misses, 1);
19271544Seschrock 
1928789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
19291544Seschrock 		    arc_read_done, buf, priority, flags, zb);
1930789Sahrens 
19312391Smaybee 		if (*arc_flags & ARC_WAIT)
1932789Sahrens 			return (zio_wait(rzio));
1933789Sahrens 
19342391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
1935789Sahrens 		zio_nowait(rzio);
1936789Sahrens 	}
1937789Sahrens 	return (0);
1938789Sahrens }
1939789Sahrens 
1940789Sahrens /*
1941789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
1942789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1943789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
1944789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1945789Sahrens  */
1946789Sahrens int
1947789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1948789Sahrens {
1949789Sahrens 	arc_buf_hdr_t *hdr;
1950789Sahrens 	kmutex_t *hash_mtx;
1951789Sahrens 	int rc = 0;
1952789Sahrens 
1953789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1954789Sahrens 
19551544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
19561544Seschrock 		arc_buf_t *buf = hdr->b_buf;
19571544Seschrock 
19581544Seschrock 		ASSERT(buf);
19591544Seschrock 		while (buf->b_data == NULL) {
19601544Seschrock 			buf = buf->b_next;
19611544Seschrock 			ASSERT(buf);
19621544Seschrock 		}
19631544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
19641544Seschrock 	} else {
1965789Sahrens 		rc = ENOENT;
19661544Seschrock 	}
1967789Sahrens 
1968789Sahrens 	if (hash_mtx)
1969789Sahrens 		mutex_exit(hash_mtx);
1970789Sahrens 
1971789Sahrens 	return (rc);
1972789Sahrens }
1973789Sahrens 
19741544Seschrock void
19751544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
19761544Seschrock {
19771544Seschrock 	ASSERT(buf->b_hdr != NULL);
19781544Seschrock 	ASSERT(buf->b_hdr->b_state != arc.anon);
19791544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
19801544Seschrock 	buf->b_efunc = func;
19811544Seschrock 	buf->b_private = private;
19821544Seschrock }
19831544Seschrock 
19841544Seschrock /*
19851544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
19861544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
19871544Seschrock  * is not yet in the evicted state, it will be put there.
19881544Seschrock  */
19891544Seschrock int
19901544Seschrock arc_buf_evict(arc_buf_t *buf)
19911544Seschrock {
19922724Smaybee 	arc_buf_hdr_t *hdr = buf->b_hdr;
19931544Seschrock 	kmutex_t *hash_lock;
19941544Seschrock 	arc_buf_t **bufp;
19951544Seschrock 
19961544Seschrock 	if (hdr == NULL) {
19971544Seschrock 		/*
19981544Seschrock 		 * We are in arc_do_user_evicts().
19991544Seschrock 		 */
20001544Seschrock 		ASSERT(buf->b_data == NULL);
20011544Seschrock 		return (0);
20021544Seschrock 	}
20031544Seschrock 
20041544Seschrock 	hash_lock = HDR_LOCK(hdr);
20051544Seschrock 	mutex_enter(hash_lock);
20061544Seschrock 
20072724Smaybee 	if (buf->b_data == NULL) {
20082724Smaybee 		/*
20092724Smaybee 		 * We are on the eviction list.
20102724Smaybee 		 */
20112724Smaybee 		mutex_exit(hash_lock);
20122724Smaybee 		mutex_enter(&arc_eviction_mtx);
20132724Smaybee 		if (buf->b_hdr == NULL) {
20142724Smaybee 			/*
20152724Smaybee 			 * We are already in arc_do_user_evicts().
20162724Smaybee 			 */
20172724Smaybee 			mutex_exit(&arc_eviction_mtx);
20182724Smaybee 			return (0);
20192724Smaybee 		} else {
20202724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
20212724Smaybee 			/*
20222724Smaybee 			 * Process this buffer now
20232724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
20242724Smaybee 			 */
20252724Smaybee 			buf->b_efunc = NULL;
20262724Smaybee 			mutex_exit(&arc_eviction_mtx);
20272724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
20282724Smaybee 			return (1);
20292724Smaybee 		}
20302724Smaybee 	}
20312724Smaybee 
20322724Smaybee 	ASSERT(buf->b_hdr == hdr);
20332724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
20341544Seschrock 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
20351544Seschrock 
20361544Seschrock 	/*
20371544Seschrock 	 * Pull this buffer off of the hdr
20381544Seschrock 	 */
20391544Seschrock 	bufp = &hdr->b_buf;
20401544Seschrock 	while (*bufp != buf)
20411544Seschrock 		bufp = &(*bufp)->b_next;
20421544Seschrock 	*bufp = buf->b_next;
20431544Seschrock 
20441544Seschrock 	ASSERT(buf->b_data != NULL);
20451544Seschrock 	buf->b_hdr = hdr;
20462688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
20471544Seschrock 
20481544Seschrock 	if (hdr->b_datacnt == 0) {
20491544Seschrock 		arc_state_t *old_state = hdr->b_state;
20501544Seschrock 		arc_state_t *evicted_state;
20511544Seschrock 
20521544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
20531544Seschrock 
20541544Seschrock 		evicted_state =
20551544Seschrock 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
20561544Seschrock 
20571544Seschrock 		mutex_enter(&old_state->mtx);
20581544Seschrock 		mutex_enter(&evicted_state->mtx);
20591544Seschrock 
20601544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
20611544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
20621544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
20631544Seschrock 
20641544Seschrock 		mutex_exit(&evicted_state->mtx);
20651544Seschrock 		mutex_exit(&old_state->mtx);
20661544Seschrock 	}
20671544Seschrock 	mutex_exit(hash_lock);
20681819Smaybee 
20691544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
20701544Seschrock 	buf->b_efunc = NULL;
20711544Seschrock 	buf->b_private = NULL;
20721544Seschrock 	buf->b_hdr = NULL;
20731544Seschrock 	kmem_cache_free(buf_cache, buf);
20741544Seschrock 	return (1);
20751544Seschrock }
20761544Seschrock 
2077789Sahrens /*
2078789Sahrens  * Release this buffer from the cache.  This must be done
2079789Sahrens  * after a read and prior to modifying the buffer contents.
2080789Sahrens  * If the buffer has more than one reference, we must make
2081789Sahrens  * make a new hdr for the buffer.
2082789Sahrens  */
2083789Sahrens void
2084789Sahrens arc_release(arc_buf_t *buf, void *tag)
2085789Sahrens {
2086789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2087789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2088789Sahrens 
2089789Sahrens 	/* this buffer is not on any list */
2090789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2091789Sahrens 
2092789Sahrens 	if (hdr->b_state == arc.anon) {
2093789Sahrens 		/* this buffer is already released */
2094789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2095789Sahrens 		ASSERT(BUF_EMPTY(hdr));
20961544Seschrock 		ASSERT(buf->b_efunc == NULL);
2097789Sahrens 		return;
2098789Sahrens 	}
2099789Sahrens 
2100789Sahrens 	mutex_enter(hash_lock);
2101789Sahrens 
21021544Seschrock 	/*
21031544Seschrock 	 * Do we have more than one buf?
21041544Seschrock 	 */
21051544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2106789Sahrens 		arc_buf_hdr_t *nhdr;
2107789Sahrens 		arc_buf_t **bufp;
2108789Sahrens 		uint64_t blksz = hdr->b_size;
2109789Sahrens 		spa_t *spa = hdr->b_spa;
2110789Sahrens 
21111544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2112789Sahrens 		/*
2113789Sahrens 		 * Pull the data off of this buf and attach it to
2114789Sahrens 		 * a new anonymous buf.
2115789Sahrens 		 */
21161544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2117789Sahrens 		bufp = &hdr->b_buf;
21181544Seschrock 		while (*bufp != buf)
2119789Sahrens 			bufp = &(*bufp)->b_next;
2120789Sahrens 		*bufp = (*bufp)->b_next;
21211544Seschrock 
2122789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2123789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
21241544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
21251544Seschrock 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
21261544Seschrock 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
21271544Seschrock 		}
21281544Seschrock 		hdr->b_datacnt -= 1;
21291544Seschrock 
2130789Sahrens 		mutex_exit(hash_lock);
2131789Sahrens 
2132789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2133789Sahrens 		nhdr->b_size = blksz;
2134789Sahrens 		nhdr->b_spa = spa;
2135789Sahrens 		nhdr->b_buf = buf;
2136789Sahrens 		nhdr->b_state = arc.anon;
2137789Sahrens 		nhdr->b_arc_access = 0;
2138789Sahrens 		nhdr->b_flags = 0;
21391544Seschrock 		nhdr->b_datacnt = 1;
2140789Sahrens 		buf->b_hdr = nhdr;
2141789Sahrens 		buf->b_next = NULL;
2142789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2143789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
2144789Sahrens 
2145789Sahrens 		hdr = nhdr;
2146789Sahrens 	} else {
21471544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2148789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2149789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2150789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
2151789Sahrens 		hdr->b_arc_access = 0;
2152789Sahrens 		mutex_exit(hash_lock);
2153789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2154789Sahrens 		hdr->b_birth = 0;
2155789Sahrens 		hdr->b_cksum0 = 0;
2156789Sahrens 	}
21571544Seschrock 	buf->b_efunc = NULL;
21581544Seschrock 	buf->b_private = NULL;
2159789Sahrens }
2160789Sahrens 
2161789Sahrens int
2162789Sahrens arc_released(arc_buf_t *buf)
2163789Sahrens {
21641544Seschrock 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
21651544Seschrock }
21661544Seschrock 
21671544Seschrock int
21681544Seschrock arc_has_callback(arc_buf_t *buf)
21691544Seschrock {
21701544Seschrock 	return (buf->b_efunc != NULL);
2171789Sahrens }
2172789Sahrens 
21731544Seschrock #ifdef ZFS_DEBUG
21741544Seschrock int
21751544Seschrock arc_referenced(arc_buf_t *buf)
21761544Seschrock {
21771544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
21781544Seschrock }
21791544Seschrock #endif
21801544Seschrock 
2181789Sahrens static void
2182789Sahrens arc_write_done(zio_t *zio)
2183789Sahrens {
2184789Sahrens 	arc_buf_t *buf;
2185789Sahrens 	arc_buf_hdr_t *hdr;
2186789Sahrens 	arc_callback_t *acb;
2187789Sahrens 
2188789Sahrens 	buf = zio->io_private;
2189789Sahrens 	hdr = buf->b_hdr;
2190789Sahrens 	acb = hdr->b_acb;
2191789Sahrens 	hdr->b_acb = NULL;
21921544Seschrock 	ASSERT(acb != NULL);
2193789Sahrens 
2194789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2195789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2196789Sahrens 
2197789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2198789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2199789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
22001544Seschrock 	/*
22011544Seschrock 	 * If the block to be written was all-zero, we may have
22021544Seschrock 	 * compressed it away.  In this case no write was performed
22031544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
22041544Seschrock 	 * must therefor remain anonymous (and uncached).
22051544Seschrock 	 */
2206789Sahrens 	if (!BUF_EMPTY(hdr)) {
2207789Sahrens 		arc_buf_hdr_t *exists;
2208789Sahrens 		kmutex_t *hash_lock;
2209789Sahrens 
2210789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2211789Sahrens 		if (exists) {
2212789Sahrens 			/*
2213789Sahrens 			 * This can only happen if we overwrite for
2214789Sahrens 			 * sync-to-convergence, because we remove
2215789Sahrens 			 * buffers from the hash table when we arc_free().
2216789Sahrens 			 */
2217789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2218789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2219789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2220789Sahrens 			    zio->io_bp->blk_birth);
2221789Sahrens 
2222789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2223789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
2224789Sahrens 			mutex_exit(hash_lock);
22251544Seschrock 			arc_hdr_destroy(exists);
2226789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2227789Sahrens 			ASSERT3P(exists, ==, NULL);
2228789Sahrens 		}
22291544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
22302688Smaybee 		arc_access(hdr, hash_lock);
22312688Smaybee 		mutex_exit(hash_lock);
22321544Seschrock 	} else if (acb->acb_done == NULL) {
22331544Seschrock 		int destroy_hdr;
22341544Seschrock 		/*
22351544Seschrock 		 * This is an anonymous buffer with no user callback,
22361544Seschrock 		 * destroy it if there are no active references.
22371544Seschrock 		 */
22381544Seschrock 		mutex_enter(&arc_eviction_mtx);
22391544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
22401544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
22411544Seschrock 		mutex_exit(&arc_eviction_mtx);
22421544Seschrock 		if (destroy_hdr)
22431544Seschrock 			arc_hdr_destroy(hdr);
22441544Seschrock 	} else {
22451544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2246789Sahrens 	}
22471544Seschrock 
22481544Seschrock 	if (acb->acb_done) {
2249789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2250789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2251789Sahrens 	}
2252789Sahrens 
22531544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2254789Sahrens }
2255789Sahrens 
2256789Sahrens int
22571775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2258789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2259789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
22601544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2261789Sahrens {
2262789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2263789Sahrens 	arc_callback_t	*acb;
2264789Sahrens 	zio_t	*rzio;
2265789Sahrens 
2266789Sahrens 	/* this is a private buffer - no locking required */
2267789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2268789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2269789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
22702237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
22712237Smaybee 	ASSERT(hdr->b_acb == 0);
2272789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2273789Sahrens 	acb->acb_done = done;
2274789Sahrens 	acb->acb_private = private;
2275789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2276789Sahrens 	hdr->b_acb = acb;
22771544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
22781775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
22791544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2280789Sahrens 
2281789Sahrens 	if (arc_flags & ARC_WAIT)
2282789Sahrens 		return (zio_wait(rzio));
2283789Sahrens 
2284789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2285789Sahrens 	zio_nowait(rzio);
2286789Sahrens 
2287789Sahrens 	return (0);
2288789Sahrens }
2289789Sahrens 
2290789Sahrens int
2291789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2292789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2293789Sahrens {
2294789Sahrens 	arc_buf_hdr_t *ab;
2295789Sahrens 	kmutex_t *hash_lock;
2296789Sahrens 	zio_t	*zio;
2297789Sahrens 
2298789Sahrens 	/*
2299789Sahrens 	 * If this buffer is in the cache, release it, so it
2300789Sahrens 	 * can be re-used.
2301789Sahrens 	 */
2302789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2303789Sahrens 	if (ab != NULL) {
2304789Sahrens 		/*
2305789Sahrens 		 * The checksum of blocks to free is not always
2306789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2307789Sahrens 		 * nonzero, it should match what we have in the cache.
2308789Sahrens 		 */
2309789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2310789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
23111990Smaybee 		if (ab->b_state != arc.anon)
23121990Smaybee 			arc_change_state(arc.anon, ab, hash_lock);
23132391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
23142391Smaybee 			/*
23152391Smaybee 			 * This should only happen when we prefetch.
23162391Smaybee 			 */
23172391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
23182391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
23192391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
23202391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
23212391Smaybee 				buf_hash_remove(ab);
23222391Smaybee 			ab->b_arc_access = 0;
23232391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
23242391Smaybee 			ab->b_birth = 0;
23252391Smaybee 			ab->b_cksum0 = 0;
23262391Smaybee 			ab->b_buf->b_efunc = NULL;
23272391Smaybee 			ab->b_buf->b_private = NULL;
23282391Smaybee 			mutex_exit(hash_lock);
23292391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2330789Sahrens 			mutex_exit(hash_lock);
23311544Seschrock 			arc_hdr_destroy(ab);
2332789Sahrens 			atomic_add_64(&arc.deleted, 1);
2333789Sahrens 		} else {
23341589Smaybee 			/*
23352391Smaybee 			 * We still have an active reference on this
23362391Smaybee 			 * buffer.  This can happen, e.g., from
23372391Smaybee 			 * dbuf_unoverride().
23381589Smaybee 			 */
23392391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2340789Sahrens 			ab->b_arc_access = 0;
2341789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2342789Sahrens 			ab->b_birth = 0;
2343789Sahrens 			ab->b_cksum0 = 0;
23441544Seschrock 			ab->b_buf->b_efunc = NULL;
23451544Seschrock 			ab->b_buf->b_private = NULL;
2346789Sahrens 			mutex_exit(hash_lock);
2347789Sahrens 		}
2348789Sahrens 	}
2349789Sahrens 
2350789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2351789Sahrens 
2352789Sahrens 	if (arc_flags & ARC_WAIT)
2353789Sahrens 		return (zio_wait(zio));
2354789Sahrens 
2355789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2356789Sahrens 	zio_nowait(zio);
2357789Sahrens 
2358789Sahrens 	return (0);
2359789Sahrens }
2360789Sahrens 
2361789Sahrens void
2362789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2363789Sahrens {
2364789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2365789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2366789Sahrens }
2367789Sahrens 
2368789Sahrens int
2369789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2370789Sahrens {
2371789Sahrens #ifdef ZFS_DEBUG
2372789Sahrens 	/*
2373789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2374789Sahrens 	 */
2375789Sahrens 	if (spa_get_random(10000) == 0) {
2376789Sahrens 		dprintf("forcing random failure\n");
2377789Sahrens 		return (ERESTART);
2378789Sahrens 	}
2379789Sahrens #endif
2380982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
2381982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
2382982Smaybee 	if (tempreserve > arc.c)
2383982Smaybee 		return (ENOMEM);
2384982Smaybee 
2385789Sahrens 	/*
2386982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2387982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2388982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2389982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2390982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2391982Smaybee 	 *
2392982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2393982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2394789Sahrens 	 */
2395789Sahrens 
2396982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2397982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2398789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2399789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
2400789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2401789Sahrens 		    tempreserve>>10, arc.c>>10);
2402789Sahrens 		return (ERESTART);
2403789Sahrens 	}
2404789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2405789Sahrens 	return (0);
2406789Sahrens }
2407789Sahrens 
2408789Sahrens void
2409789Sahrens arc_init(void)
2410789Sahrens {
2411789Sahrens 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
2412789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2413789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2414789Sahrens 
24152391Smaybee 	/* Convert seconds to clock ticks */
24162638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
24172391Smaybee 
2418789Sahrens 	/* Start out with 1/8 of all memory */
2419789Sahrens 	arc.c = physmem * PAGESIZE / 8;
2420789Sahrens 
2421789Sahrens #ifdef _KERNEL
2422789Sahrens 	/*
2423789Sahrens 	 * On architectures where the physical memory can be larger
2424789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2425789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2426789Sahrens 	 */
2427789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2428789Sahrens #endif
2429789Sahrens 
2430982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2431789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
2432982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2433789Sahrens 	if (arc.c * 8 >= 1<<30)
2434789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
2435789Sahrens 	else
2436789Sahrens 		arc.c_max = arc.c_min;
2437789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
2438*2885Sahrens 
2439*2885Sahrens 	/*
2440*2885Sahrens 	 * Allow the tunables to override our calculations if they are
2441*2885Sahrens 	 * reasonable (ie. over 64MB)
2442*2885Sahrens 	 */
2443*2885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
2444*2885Sahrens 		arc.c_max = zfs_arc_max;
2445*2885Sahrens 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
2446*2885Sahrens 		arc.c_min = zfs_arc_min;
2447*2885Sahrens 
2448789Sahrens 	arc.c = arc.c_max;
2449789Sahrens 	arc.p = (arc.c >> 1);
2450789Sahrens 
2451789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2452789Sahrens 	if (kmem_debugging())
2453789Sahrens 		arc.c = arc.c / 2;
2454789Sahrens 	if (arc.c < arc.c_min)
2455789Sahrens 		arc.c = arc.c_min;
2456789Sahrens 
2457789Sahrens 	arc.anon = &ARC_anon;
24581544Seschrock 	arc.mru = &ARC_mru;
24591544Seschrock 	arc.mru_ghost = &ARC_mru_ghost;
24601544Seschrock 	arc.mfu = &ARC_mfu;
24611544Seschrock 	arc.mfu_ghost = &ARC_mfu_ghost;
24621544Seschrock 	arc.size = 0;
2463789Sahrens 
24642688Smaybee 	arc.hits = 0;
24652688Smaybee 	arc.recycle_miss = 0;
24662688Smaybee 	arc.evict_skip = 0;
24672688Smaybee 	arc.mutex_miss = 0;
24682688Smaybee 
24692856Snd150628 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
24702856Snd150628 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
24712856Snd150628 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
24722856Snd150628 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
24732856Snd150628 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
24742856Snd150628 
24751544Seschrock 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2476789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
24771544Seschrock 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2478789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
24791544Seschrock 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2480789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
24811544Seschrock 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2482789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2483789Sahrens 
2484789Sahrens 	buf_init();
2485789Sahrens 
2486789Sahrens 	arc_thread_exit = 0;
24871544Seschrock 	arc_eviction_list = NULL;
24881544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2489789Sahrens 
2490789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2491789Sahrens 	    TS_RUN, minclsyspri);
2492789Sahrens }
2493789Sahrens 
2494789Sahrens void
2495789Sahrens arc_fini(void)
2496789Sahrens {
2497789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2498789Sahrens 	arc_thread_exit = 1;
2499789Sahrens 	while (arc_thread_exit != 0)
2500789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2501789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2502789Sahrens 
2503789Sahrens 	arc_flush();
2504789Sahrens 
2505789Sahrens 	arc_dead = TRUE;
2506789Sahrens 
25071544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2508789Sahrens 	mutex_destroy(&arc_reclaim_lock);
2509789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2510789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2511789Sahrens 
25121544Seschrock 	list_destroy(&arc.mru->list);
25131544Seschrock 	list_destroy(&arc.mru_ghost->list);
25141544Seschrock 	list_destroy(&arc.mfu->list);
25151544Seschrock 	list_destroy(&arc.mfu_ghost->list);
2516789Sahrens 
25172856Snd150628 	mutex_destroy(&arc.anon->mtx);
25182856Snd150628 	mutex_destroy(&arc.mru->mtx);
25192856Snd150628 	mutex_destroy(&arc.mru_ghost->mtx);
25202856Snd150628 	mutex_destroy(&arc.mfu->mtx);
25212856Snd150628 	mutex_destroy(&arc.mfu_ghost->mtx);
25222856Snd150628 
2523789Sahrens 	buf_fini();
2524789Sahrens }
2525