xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 1936)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221484Sek110237  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * DVA-based Adjustable Relpacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
99789Sahrens  * the "top" state mutex must be held before the "bot" state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
116789Sahrens #include <sys/zfs_context.h>
117789Sahrens #include <sys/arc.h>
118789Sahrens #include <sys/refcount.h>
119789Sahrens #ifdef _KERNEL
120789Sahrens #include <sys/vmsystm.h>
121789Sahrens #include <vm/anon.h>
122789Sahrens #include <sys/fs/swapnode.h>
1231484Sek110237 #include <sys/dnlc.h>
124789Sahrens #endif
125789Sahrens #include <sys/callb.h>
126789Sahrens 
127789Sahrens static kmutex_t		arc_reclaim_thr_lock;
128789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
129789Sahrens static uint8_t		arc_thread_exit;
130789Sahrens 
1311484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1321484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1331484Sek110237 
134789Sahrens typedef enum arc_reclaim_strategy {
135789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
136789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
137789Sahrens } arc_reclaim_strategy_t;
138789Sahrens 
139789Sahrens /* number of seconds before growing cache again */
140789Sahrens static int		arc_grow_retry = 60;
141789Sahrens 
142789Sahrens static kmutex_t arc_reclaim_lock;
143789Sahrens static int arc_dead;
144789Sahrens 
145789Sahrens /*
146789Sahrens  * Note that buffers can be on one of 5 states:
147789Sahrens  *	ARC_anon	- anonymous (discussed below)
1481544Seschrock  *	ARC_mru		- recently used, currently cached
1491544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1501544Seschrock  *	ARC_mfu		- frequently used, currently cached
1511544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
152789Sahrens  * When there are no active references to the buffer, they
153789Sahrens  * are linked onto one of the lists in arc.  These are the
154789Sahrens  * only buffers that can be evicted or deleted.
155789Sahrens  *
156789Sahrens  * Anonymous buffers are buffers that are not associated with
157789Sahrens  * a DVA.  These are buffers that hold dirty block copies
158789Sahrens  * before they are written to stable storage.  By definition,
1591544Seschrock  * they are "ref'd" and are considered part of arc_mru
160789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1611544Seschrock  * as they are written and migrate onto the arc_mru list.
162789Sahrens  */
163789Sahrens 
164789Sahrens typedef struct arc_state {
165789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
166789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
167789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
168789Sahrens 	uint64_t hits;
169789Sahrens 	kmutex_t mtx;
170789Sahrens } arc_state_t;
171789Sahrens 
172789Sahrens /* The 5 states: */
173789Sahrens static arc_state_t ARC_anon;
1741544Seschrock static arc_state_t ARC_mru;
1751544Seschrock static arc_state_t ARC_mru_ghost;
1761544Seschrock static arc_state_t ARC_mfu;
1771544Seschrock static arc_state_t ARC_mfu_ghost;
178789Sahrens 
179789Sahrens static struct arc {
180789Sahrens 	arc_state_t 	*anon;
1811544Seschrock 	arc_state_t	*mru;
1821544Seschrock 	arc_state_t	*mru_ghost;
1831544Seschrock 	arc_state_t	*mfu;
1841544Seschrock 	arc_state_t	*mfu_ghost;
185789Sahrens 	uint64_t	size;		/* Actual total arc size */
1861544Seschrock 	uint64_t	p;		/* Target size (in bytes) of mru */
187789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
188789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
189789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
190789Sahrens 
191789Sahrens 	/* performance stats */
192789Sahrens 	uint64_t	hits;
193789Sahrens 	uint64_t	misses;
194789Sahrens 	uint64_t	deleted;
195789Sahrens 	uint64_t	skipped;
196789Sahrens 	uint64_t	hash_elements;
197789Sahrens 	uint64_t	hash_elements_max;
198789Sahrens 	uint64_t	hash_collisions;
199789Sahrens 	uint64_t	hash_chains;
200789Sahrens 	uint32_t	hash_chain_max;
201789Sahrens 
202789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
203789Sahrens } arc;
204789Sahrens 
205789Sahrens static uint64_t arc_tempreserve;
206789Sahrens 
207789Sahrens typedef struct arc_callback arc_callback_t;
208789Sahrens 
209789Sahrens struct arc_callback {
210789Sahrens 	arc_done_func_t		*acb_done;
211789Sahrens 	void			*acb_private;
212789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
213789Sahrens 	arc_buf_t		*acb_buf;
214789Sahrens 	zio_t			*acb_zio_dummy;
215789Sahrens 	arc_callback_t		*acb_next;
216789Sahrens };
217789Sahrens 
218789Sahrens struct arc_buf_hdr {
219789Sahrens 	/* immutable */
220789Sahrens 	uint64_t		b_size;
221789Sahrens 	spa_t			*b_spa;
222789Sahrens 
223789Sahrens 	/* protected by hash lock */
224789Sahrens 	dva_t			b_dva;
225789Sahrens 	uint64_t		b_birth;
226789Sahrens 	uint64_t		b_cksum0;
227789Sahrens 
228789Sahrens 	arc_buf_hdr_t		*b_hash_next;
229789Sahrens 	arc_buf_t		*b_buf;
230789Sahrens 	uint32_t		b_flags;
2311544Seschrock 	uint32_t		b_datacnt;
232789Sahrens 
233789Sahrens 	kcondvar_t		b_cv;
234789Sahrens 	arc_callback_t		*b_acb;
235789Sahrens 
236789Sahrens 	/* protected by arc state mutex */
237789Sahrens 	arc_state_t		*b_state;
238789Sahrens 	list_node_t		b_arc_node;
239789Sahrens 
240789Sahrens 	/* updated atomically */
241789Sahrens 	clock_t			b_arc_access;
242789Sahrens 
243789Sahrens 	/* self protecting */
244789Sahrens 	refcount_t		b_refcnt;
245789Sahrens };
246789Sahrens 
2471544Seschrock static arc_buf_t *arc_eviction_list;
2481544Seschrock static kmutex_t arc_eviction_mtx;
2491544Seschrock static void arc_access_and_exit(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
2501544Seschrock 
2511544Seschrock #define	GHOST_STATE(state)	\
2521544Seschrock 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
2531544Seschrock 
254789Sahrens /*
255789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
256789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
257789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
258789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
259789Sahrens  * public flags, make sure not to smash the private ones.
260789Sahrens  */
261789Sahrens 
2621544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
263789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
264789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
265789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
2661544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
267789Sahrens 
2681544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
269789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
270789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
271789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
2721544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
273789Sahrens 
274789Sahrens /*
275789Sahrens  * Hash table routines
276789Sahrens  */
277789Sahrens 
278789Sahrens #define	HT_LOCK_PAD	64
279789Sahrens 
280789Sahrens struct ht_lock {
281789Sahrens 	kmutex_t	ht_lock;
282789Sahrens #ifdef _KERNEL
283789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
284789Sahrens #endif
285789Sahrens };
286789Sahrens 
287789Sahrens #define	BUF_LOCKS 256
288789Sahrens typedef struct buf_hash_table {
289789Sahrens 	uint64_t ht_mask;
290789Sahrens 	arc_buf_hdr_t **ht_table;
291789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
292789Sahrens } buf_hash_table_t;
293789Sahrens 
294789Sahrens static buf_hash_table_t buf_hash_table;
295789Sahrens 
296789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
297789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
298789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
299789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
300789Sahrens #define	HDR_LOCK(buf) \
301789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
302789Sahrens 
303789Sahrens uint64_t zfs_crc64_table[256];
304789Sahrens 
305789Sahrens static uint64_t
306789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
307789Sahrens {
308789Sahrens 	uintptr_t spav = (uintptr_t)spa;
309789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
310789Sahrens 	uint64_t crc = -1ULL;
311789Sahrens 	int i;
312789Sahrens 
313789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
314789Sahrens 
315789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
316789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
317789Sahrens 
318789Sahrens 	crc ^= (spav>>8) ^ birth;
319789Sahrens 
320789Sahrens 	return (crc);
321789Sahrens }
322789Sahrens 
323789Sahrens #define	BUF_EMPTY(buf)						\
324789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
325789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
326789Sahrens 	(buf)->b_birth == 0)
327789Sahrens 
328789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
329789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
330789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
331789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
332789Sahrens 
333789Sahrens static arc_buf_hdr_t *
334789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
335789Sahrens {
336789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
337789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
338789Sahrens 	arc_buf_hdr_t *buf;
339789Sahrens 
340789Sahrens 	mutex_enter(hash_lock);
341789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
342789Sahrens 	    buf = buf->b_hash_next) {
343789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
344789Sahrens 			*lockp = hash_lock;
345789Sahrens 			return (buf);
346789Sahrens 		}
347789Sahrens 	}
348789Sahrens 	mutex_exit(hash_lock);
349789Sahrens 	*lockp = NULL;
350789Sahrens 	return (NULL);
351789Sahrens }
352789Sahrens 
353789Sahrens /*
354789Sahrens  * Insert an entry into the hash table.  If there is already an element
355789Sahrens  * equal to elem in the hash table, then the already existing element
356789Sahrens  * will be returned and the new element will not be inserted.
357789Sahrens  * Otherwise returns NULL.
358789Sahrens  */
359789Sahrens static arc_buf_hdr_t *fbufs[4]; /* XXX to find 6341326 */
360789Sahrens static kthread_t *fbufs_lastthread;
361789Sahrens static arc_buf_hdr_t *
362789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
363789Sahrens {
364789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
365789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
366789Sahrens 	arc_buf_hdr_t *fbuf;
367789Sahrens 	uint32_t max, i;
368789Sahrens 
3691544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
370789Sahrens 	fbufs_lastthread = curthread;
371789Sahrens 	*lockp = hash_lock;
372789Sahrens 	mutex_enter(hash_lock);
373789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
374789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
375789Sahrens 		if (i < sizeof (fbufs) / sizeof (fbufs[0]))
376789Sahrens 			fbufs[i] = fbuf;
377789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
378789Sahrens 			return (fbuf);
379789Sahrens 	}
380789Sahrens 
381789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
382789Sahrens 	buf_hash_table.ht_table[idx] = buf;
3831544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
384789Sahrens 
385789Sahrens 	/* collect some hash table performance data */
386789Sahrens 	if (i > 0) {
387789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
388789Sahrens 		if (i == 1)
389789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
390789Sahrens 	}
391789Sahrens 	while (i > (max = arc.hash_chain_max) &&
392789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
393789Sahrens 		continue;
394789Sahrens 	}
395789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
396789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
397789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
398789Sahrens 
399789Sahrens 	return (NULL);
400789Sahrens }
401789Sahrens 
402789Sahrens static void
403789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
404789Sahrens {
405789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
406789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
407789Sahrens 
408789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
4091544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
410789Sahrens 
411789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
412789Sahrens 	while ((fbuf = *bufp) != buf) {
413789Sahrens 		ASSERT(fbuf != NULL);
414789Sahrens 		bufp = &fbuf->b_hash_next;
415789Sahrens 	}
416789Sahrens 	*bufp = buf->b_hash_next;
417789Sahrens 	buf->b_hash_next = NULL;
4181544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
419789Sahrens 
420789Sahrens 	/* collect some hash table performance data */
421789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
422789Sahrens 	if (buf_hash_table.ht_table[idx] &&
423789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
424789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
425789Sahrens }
426789Sahrens 
427789Sahrens /*
428789Sahrens  * Global data structures and functions for the buf kmem cache.
429789Sahrens  */
430789Sahrens static kmem_cache_t *hdr_cache;
431789Sahrens static kmem_cache_t *buf_cache;
432789Sahrens 
433789Sahrens static void
434789Sahrens buf_fini(void)
435789Sahrens {
436789Sahrens 	int i;
437789Sahrens 
438789Sahrens 	kmem_free(buf_hash_table.ht_table,
439789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
440789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
441789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
442789Sahrens 	kmem_cache_destroy(hdr_cache);
443789Sahrens 	kmem_cache_destroy(buf_cache);
444789Sahrens }
445789Sahrens 
446789Sahrens /*
447789Sahrens  * Constructor callback - called when the cache is empty
448789Sahrens  * and a new buf is requested.
449789Sahrens  */
450789Sahrens /* ARGSUSED */
451789Sahrens static int
452789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
453789Sahrens {
454789Sahrens 	arc_buf_hdr_t *buf = vbuf;
455789Sahrens 
456789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
457789Sahrens 	refcount_create(&buf->b_refcnt);
458789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
459789Sahrens 	return (0);
460789Sahrens }
461789Sahrens 
462789Sahrens /*
463789Sahrens  * Destructor callback - called when a cached buf is
464789Sahrens  * no longer required.
465789Sahrens  */
466789Sahrens /* ARGSUSED */
467789Sahrens static void
468789Sahrens hdr_dest(void *vbuf, void *unused)
469789Sahrens {
470789Sahrens 	arc_buf_hdr_t *buf = vbuf;
471789Sahrens 
472789Sahrens 	refcount_destroy(&buf->b_refcnt);
473789Sahrens 	cv_destroy(&buf->b_cv);
474789Sahrens }
475789Sahrens 
4761544Seschrock static int arc_reclaim_needed(void);
477789Sahrens void arc_kmem_reclaim(void);
478789Sahrens 
479789Sahrens /*
480789Sahrens  * Reclaim callback -- invoked when memory is low.
481789Sahrens  */
482789Sahrens /* ARGSUSED */
483789Sahrens static void
484789Sahrens hdr_recl(void *unused)
485789Sahrens {
486789Sahrens 	dprintf("hdr_recl called\n");
4871544Seschrock 	if (arc_reclaim_needed())
4881544Seschrock 		arc_kmem_reclaim();
489789Sahrens }
490789Sahrens 
491789Sahrens static void
492789Sahrens buf_init(void)
493789Sahrens {
494789Sahrens 	uint64_t *ct;
4951544Seschrock 	uint64_t hsize = 1ULL << 12;
496789Sahrens 	int i, j;
497789Sahrens 
498789Sahrens 	/*
499789Sahrens 	 * The hash table is big enough to fill all of physical memory
5001544Seschrock 	 * with an average 64K block size.  The table will take up
5011544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
502789Sahrens 	 */
5031544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
504789Sahrens 		hsize <<= 1;
5051544Seschrock retry:
506789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
5071544Seschrock 	buf_hash_table.ht_table =
5081544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
5091544Seschrock 	if (buf_hash_table.ht_table == NULL) {
5101544Seschrock 		ASSERT(hsize > (1ULL << 8));
5111544Seschrock 		hsize >>= 1;
5121544Seschrock 		goto retry;
5131544Seschrock 	}
514789Sahrens 
515789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
516789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
517789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
518789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
519789Sahrens 
520789Sahrens 	for (i = 0; i < 256; i++)
521789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
522789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
523789Sahrens 
524789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
525789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
526789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
527789Sahrens 	}
528789Sahrens }
529789Sahrens 
530789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
531789Sahrens 
532789Sahrens static void
533789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
534789Sahrens {
535789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
536789Sahrens 
537789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
538789Sahrens 	    (ab->b_state != arc.anon)) {
5391544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
540789Sahrens 
541789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
542789Sahrens 		mutex_enter(&ab->b_state->mtx);
5431544Seschrock 		ASSERT(refcount_count(&ab->b_refcnt) > 0);
544789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
545789Sahrens 		list_remove(&ab->b_state->list, ab);
5461544Seschrock 		if (GHOST_STATE(ab->b_state)) {
5471544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
5481544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
5491544Seschrock 			delta = ab->b_size;
5501544Seschrock 		}
5511544Seschrock 		ASSERT(delta > 0);
5521544Seschrock 		ASSERT3U(ab->b_state->lsize, >=, delta);
5531544Seschrock 		atomic_add_64(&ab->b_state->lsize, -delta);
554789Sahrens 		mutex_exit(&ab->b_state->mtx);
555789Sahrens 	}
556789Sahrens }
557789Sahrens 
558789Sahrens static int
559789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
560789Sahrens {
561789Sahrens 	int cnt;
562789Sahrens 
5631544Seschrock 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
5641544Seschrock 	ASSERT(!GHOST_STATE(ab->b_state));
565789Sahrens 
566789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
567789Sahrens 	    (ab->b_state != arc.anon)) {
568789Sahrens 
569789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
570789Sahrens 		mutex_enter(&ab->b_state->mtx);
571789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
572789Sahrens 		list_insert_head(&ab->b_state->list, ab);
5731544Seschrock 		ASSERT(ab->b_datacnt > 0);
5741544Seschrock 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
5751544Seschrock 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
576789Sahrens 		mutex_exit(&ab->b_state->mtx);
577789Sahrens 	}
578789Sahrens 	return (cnt);
579789Sahrens }
580789Sahrens 
581789Sahrens /*
582789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
583789Sahrens  * for the buffer must be held by the caller.
584789Sahrens  */
585789Sahrens static void
5861544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
587789Sahrens {
5881544Seschrock 	arc_state_t *old_state = ab->b_state;
5891544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
5901544Seschrock 	int from_delta, to_delta;
591789Sahrens 
592789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
5931544Seschrock 	ASSERT(new_state != old_state);
5941544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
5951544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
5961544Seschrock 
5971544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
598789Sahrens 
599789Sahrens 	/*
600789Sahrens 	 * If this buffer is evictable, transfer it from the
601789Sahrens 	 * old state list to the new state list.
602789Sahrens 	 */
6031544Seschrock 	if (refcnt == 0) {
6041544Seschrock 		if (old_state != arc.anon) {
6051544Seschrock 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
6061544Seschrock 
6071544Seschrock 			if (use_mutex)
6081544Seschrock 				mutex_enter(&old_state->mtx);
6091544Seschrock 
6101544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
6111544Seschrock 			list_remove(&old_state->list, ab);
612789Sahrens 
6131544Seschrock 			/* ghost elements have a ghost size */
6141544Seschrock 			if (GHOST_STATE(old_state)) {
6151544Seschrock 				ASSERT(ab->b_datacnt == 0);
6161544Seschrock 				ASSERT(ab->b_buf == NULL);
6171544Seschrock 				from_delta = ab->b_size;
618789Sahrens 			}
6191544Seschrock 			ASSERT3U(old_state->lsize, >=, from_delta);
6201544Seschrock 			atomic_add_64(&old_state->lsize, -from_delta);
6211544Seschrock 
6221544Seschrock 			if (use_mutex)
6231544Seschrock 				mutex_exit(&old_state->mtx);
624789Sahrens 		}
625789Sahrens 		if (new_state != arc.anon) {
6261544Seschrock 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
627789Sahrens 
6281544Seschrock 			if (use_mutex)
629789Sahrens 				mutex_enter(&new_state->mtx);
6301544Seschrock 
631789Sahrens 			list_insert_head(&new_state->list, ab);
6321544Seschrock 
6331544Seschrock 			/* ghost elements have a ghost size */
6341544Seschrock 			if (GHOST_STATE(new_state)) {
6351544Seschrock 				ASSERT(ab->b_datacnt == 0);
6361544Seschrock 				ASSERT(ab->b_buf == NULL);
6371544Seschrock 				to_delta = ab->b_size;
6381544Seschrock 			}
6391544Seschrock 			atomic_add_64(&new_state->lsize, to_delta);
6401544Seschrock 			ASSERT3U(new_state->size + to_delta, >=,
6411544Seschrock 			    new_state->lsize);
6421544Seschrock 
6431544Seschrock 			if (use_mutex)
644789Sahrens 				mutex_exit(&new_state->mtx);
645789Sahrens 		}
646789Sahrens 	}
647789Sahrens 
648789Sahrens 	ASSERT(!BUF_EMPTY(ab));
6491544Seschrock 	if (new_state == arc.anon && old_state != arc.anon) {
650789Sahrens 		buf_hash_remove(ab);
651789Sahrens 	}
652789Sahrens 
653789Sahrens 	/*
654789Sahrens 	 * If this buffer isn't being transferred to the MRU-top
655789Sahrens 	 * state, it's safe to clear its prefetch flag
656789Sahrens 	 */
6571544Seschrock 	if ((new_state != arc.mru) && (new_state != arc.mru_ghost)) {
658789Sahrens 		ab->b_flags &= ~ARC_PREFETCH;
659789Sahrens 	}
660789Sahrens 
6611544Seschrock 	/* adjust state sizes */
6621544Seschrock 	if (to_delta)
6631544Seschrock 		atomic_add_64(&new_state->size, to_delta);
6641544Seschrock 	if (from_delta) {
6651544Seschrock 		ASSERT3U(old_state->size, >=, from_delta);
6661544Seschrock 		atomic_add_64(&old_state->size, -from_delta);
667789Sahrens 	}
668789Sahrens 	ab->b_state = new_state;
669789Sahrens }
670789Sahrens 
671789Sahrens arc_buf_t *
672789Sahrens arc_buf_alloc(spa_t *spa, int size, void *tag)
673789Sahrens {
674789Sahrens 	arc_buf_hdr_t *hdr;
675789Sahrens 	arc_buf_t *buf;
676789Sahrens 
677789Sahrens 	ASSERT3U(size, >, 0);
678789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
679789Sahrens 	ASSERT(BUF_EMPTY(hdr));
680789Sahrens 	hdr->b_size = size;
681789Sahrens 	hdr->b_spa = spa;
682789Sahrens 	hdr->b_state = arc.anon;
683789Sahrens 	hdr->b_arc_access = 0;
684789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
685789Sahrens 	buf->b_hdr = hdr;
6861544Seschrock 	buf->b_efunc = NULL;
6871544Seschrock 	buf->b_private = NULL;
688789Sahrens 	buf->b_next = NULL;
689789Sahrens 	buf->b_data = zio_buf_alloc(size);
690789Sahrens 	hdr->b_buf = buf;
6911544Seschrock 	hdr->b_datacnt = 1;
692789Sahrens 	hdr->b_flags = 0;
693789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
694789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
695789Sahrens 
696789Sahrens 	atomic_add_64(&arc.size, size);
697789Sahrens 	atomic_add_64(&arc.anon->size, size);
698789Sahrens 
699789Sahrens 	return (buf);
700789Sahrens }
701789Sahrens 
7021544Seschrock static void *
7031544Seschrock arc_data_copy(arc_buf_hdr_t *hdr, void *old_data)
7041544Seschrock {
7051544Seschrock 	void *new_data = zio_buf_alloc(hdr->b_size);
7061544Seschrock 
7071544Seschrock 	atomic_add_64(&arc.size, hdr->b_size);
7081544Seschrock 	bcopy(old_data, new_data, hdr->b_size);
7091544Seschrock 	atomic_add_64(&hdr->b_state->size, hdr->b_size);
7101544Seschrock 	if (list_link_active(&hdr->b_arc_node)) {
7111544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
7121544Seschrock 		atomic_add_64(&hdr->b_state->lsize, hdr->b_size);
7131544Seschrock 	}
7141544Seschrock 	return (new_data);
7151544Seschrock }
7161544Seschrock 
7171544Seschrock void
7181544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
7191544Seschrock {
7201544Seschrock 	arc_buf_hdr_t *hdr;
7211544Seschrock 	kmutex_t *hash_lock;
7221544Seschrock 
7231544Seschrock 	mutex_enter(&arc_eviction_mtx);
7241544Seschrock 	hdr = buf->b_hdr;
7251544Seschrock 	if (buf->b_data == NULL) {
7261544Seschrock 		/*
7271544Seschrock 		 * This buffer is evicted.
7281544Seschrock 		 */
7291544Seschrock 		mutex_exit(&arc_eviction_mtx);
7301544Seschrock 		return;
7311544Seschrock 	} else {
7321544Seschrock 		/*
7331544Seschrock 		 * Prevent this buffer from being evicted
7341544Seschrock 		 * while we add a reference.
7351544Seschrock 		 */
7361544Seschrock 		buf->b_hdr = NULL;
7371544Seschrock 	}
7381544Seschrock 	mutex_exit(&arc_eviction_mtx);
7391544Seschrock 
7401544Seschrock 	ASSERT(hdr->b_state != arc.anon);
7411544Seschrock 	hash_lock = HDR_LOCK(hdr);
7421544Seschrock 	mutex_enter(hash_lock);
7431544Seschrock 	ASSERT(!GHOST_STATE(hdr->b_state));
7441544Seschrock 	buf->b_hdr = hdr;
7451544Seschrock 	add_reference(hdr, hash_lock, tag);
7461544Seschrock 	arc_access_and_exit(hdr, hash_lock);
7471544Seschrock 	atomic_add_64(&arc.hits, 1);
7481544Seschrock }
7491544Seschrock 
750789Sahrens static void
7511544Seschrock arc_buf_destroy(arc_buf_t *buf, boolean_t all)
7521544Seschrock {
7531544Seschrock 	arc_buf_t **bufp;
7541544Seschrock 
7551544Seschrock 	/* free up data associated with the buf */
7561544Seschrock 	if (buf->b_data) {
7571544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
7581544Seschrock 		uint64_t size = buf->b_hdr->b_size;
7591544Seschrock 
7601544Seschrock 		zio_buf_free(buf->b_data, size);
7611544Seschrock 		atomic_add_64(&arc.size, -size);
7621544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
7631544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
7641544Seschrock 			ASSERT(state != arc.anon);
7651544Seschrock 			ASSERT3U(state->lsize, >=, size);
7661544Seschrock 			atomic_add_64(&state->lsize, -size);
7671544Seschrock 		}
7681544Seschrock 		ASSERT3U(state->size, >=, size);
7691544Seschrock 		atomic_add_64(&state->size, -size);
7701544Seschrock 		buf->b_data = NULL;
7711544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
7721544Seschrock 		buf->b_hdr->b_datacnt -= 1;
7731544Seschrock 	}
7741544Seschrock 
7751544Seschrock 	/* only remove the buf if requested */
7761544Seschrock 	if (!all)
7771544Seschrock 		return;
7781544Seschrock 
7791544Seschrock 	/* remove the buf from the hdr list */
7801544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
7811544Seschrock 		continue;
7821544Seschrock 	*bufp = buf->b_next;
7831544Seschrock 
7841544Seschrock 	ASSERT(buf->b_efunc == NULL);
7851544Seschrock 
7861544Seschrock 	/* clean up the buf */
7871544Seschrock 	buf->b_hdr = NULL;
7881544Seschrock 	kmem_cache_free(buf_cache, buf);
7891544Seschrock }
7901544Seschrock 
7911544Seschrock static void
7921544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
793789Sahrens {
794789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
795789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
7961544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
797789Sahrens 
798789Sahrens 	if (!BUF_EMPTY(hdr)) {
7991544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
800789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
801789Sahrens 		hdr->b_birth = 0;
802789Sahrens 		hdr->b_cksum0 = 0;
803789Sahrens 	}
8041544Seschrock 	while (hdr->b_buf) {
805789Sahrens 		arc_buf_t *buf = hdr->b_buf;
806789Sahrens 
8071544Seschrock 		if (buf->b_efunc) {
8081544Seschrock 			mutex_enter(&arc_eviction_mtx);
8091544Seschrock 			ASSERT(buf->b_hdr != NULL);
8101544Seschrock 			arc_buf_destroy(hdr->b_buf, FALSE);
8111544Seschrock 			hdr->b_buf = buf->b_next;
8121544Seschrock 			buf->b_next = arc_eviction_list;
8131544Seschrock 			arc_eviction_list = buf;
8141544Seschrock 			mutex_exit(&arc_eviction_mtx);
8151544Seschrock 		} else {
8161544Seschrock 			arc_buf_destroy(hdr->b_buf, TRUE);
8171544Seschrock 		}
818789Sahrens 	}
8191544Seschrock 
820789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
821789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
822789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
823789Sahrens 	kmem_cache_free(hdr_cache, hdr);
824789Sahrens }
825789Sahrens 
826789Sahrens void
827789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
828789Sahrens {
829789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
8301544Seschrock 	int hashed = hdr->b_state != arc.anon;
8311544Seschrock 
8321544Seschrock 	ASSERT(buf->b_efunc == NULL);
8331544Seschrock 	ASSERT(buf->b_data != NULL);
8341544Seschrock 
8351544Seschrock 	if (hashed) {
8361544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
8371544Seschrock 
8381544Seschrock 		mutex_enter(hash_lock);
8391544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
8401544Seschrock 		if (hdr->b_datacnt > 1)
8411544Seschrock 			arc_buf_destroy(buf, TRUE);
8421544Seschrock 		else
8431544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
8441544Seschrock 		mutex_exit(hash_lock);
8451544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
8461544Seschrock 		int destroy_hdr;
8471544Seschrock 		/*
8481544Seschrock 		 * We are in the middle of an async write.  Don't destroy
8491544Seschrock 		 * this buffer unless the write completes before we finish
8501544Seschrock 		 * decrementing the reference count.
8511544Seschrock 		 */
8521544Seschrock 		mutex_enter(&arc_eviction_mtx);
8531544Seschrock 		(void) remove_reference(hdr, NULL, tag);
8541544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
8551544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
8561544Seschrock 		mutex_exit(&arc_eviction_mtx);
8571544Seschrock 		if (destroy_hdr)
8581544Seschrock 			arc_hdr_destroy(hdr);
8591544Seschrock 	} else {
8601544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
8611544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
8621544Seschrock 			arc_buf_destroy(buf, TRUE);
8631544Seschrock 		} else {
8641544Seschrock 			arc_hdr_destroy(hdr);
8651544Seschrock 		}
8661544Seschrock 	}
8671544Seschrock }
8681544Seschrock 
8691544Seschrock int
8701544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
8711544Seschrock {
8721544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
873789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
8741544Seschrock 	int no_callback = (buf->b_efunc == NULL);
8751544Seschrock 
8761544Seschrock 	if (hdr->b_state == arc.anon) {
8771544Seschrock 		arc_buf_free(buf, tag);
8781544Seschrock 		return (no_callback);
8791544Seschrock 	}
880789Sahrens 
881789Sahrens 	mutex_enter(hash_lock);
8821544Seschrock 	ASSERT(hdr->b_state != arc.anon);
8831544Seschrock 	ASSERT(buf->b_data != NULL);
884789Sahrens 
8851544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
8861544Seschrock 	if (hdr->b_datacnt > 1) {
8871544Seschrock 		if (no_callback)
8881544Seschrock 			arc_buf_destroy(buf, TRUE);
8891544Seschrock 	} else if (no_callback) {
8901544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
8911544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
892789Sahrens 	}
8931544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
8941544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
895789Sahrens 	mutex_exit(hash_lock);
8961544Seschrock 	return (no_callback);
897789Sahrens }
898789Sahrens 
899789Sahrens int
900789Sahrens arc_buf_size(arc_buf_t *buf)
901789Sahrens {
902789Sahrens 	return (buf->b_hdr->b_size);
903789Sahrens }
904789Sahrens 
905789Sahrens /*
906789Sahrens  * Evict buffers from list until we've removed the specified number of
907789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
908789Sahrens  */
909789Sahrens static uint64_t
9101544Seschrock arc_evict(arc_state_t *state, int64_t bytes)
911789Sahrens {
912789Sahrens 	arc_state_t *evicted_state;
9131544Seschrock 	uint64_t bytes_evicted = 0, skipped = 0;
914789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
915789Sahrens 	kmutex_t *hash_lock;
916789Sahrens 
9171544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
918789Sahrens 
9191544Seschrock 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
920789Sahrens 
921789Sahrens 	mutex_enter(&state->mtx);
922789Sahrens 	mutex_enter(&evicted_state->mtx);
923789Sahrens 
924789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
925789Sahrens 		ab_prev = list_prev(&state->list, ab);
926789Sahrens 		hash_lock = HDR_LOCK(ab);
927789Sahrens 		if (mutex_tryenter(hash_lock)) {
928789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
9291544Seschrock 			ASSERT(ab->b_datacnt > 0);
9301544Seschrock 			while (ab->b_buf) {
9311544Seschrock 				arc_buf_t *buf = ab->b_buf;
9321544Seschrock 				if (buf->b_data)
9331544Seschrock 					bytes_evicted += ab->b_size;
9341544Seschrock 				if (buf->b_efunc) {
9351544Seschrock 					mutex_enter(&arc_eviction_mtx);
9361544Seschrock 					/*
9371544Seschrock 					 * arc_buf_add_ref() could derail
9381544Seschrock 					 * this eviction.
9391544Seschrock 					 */
9401544Seschrock 					if (buf->b_hdr == NULL) {
9411544Seschrock 						mutex_exit(&arc_eviction_mtx);
9421544Seschrock 						mutex_exit(hash_lock);
9431544Seschrock 						goto skip;
9441544Seschrock 					}
9451544Seschrock 					arc_buf_destroy(buf, FALSE);
9461544Seschrock 					ab->b_buf = buf->b_next;
9471544Seschrock 					buf->b_next = arc_eviction_list;
9481544Seschrock 					arc_eviction_list = buf;
9491544Seschrock 					mutex_exit(&arc_eviction_mtx);
9501544Seschrock 				} else {
9511544Seschrock 					arc_buf_destroy(buf, TRUE);
9521544Seschrock 				}
9531544Seschrock 			}
9541544Seschrock 			ASSERT(ab->b_datacnt == 0);
955789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
9561544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
9571544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
958789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
959789Sahrens 			mutex_exit(hash_lock);
9601544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
961789Sahrens 				break;
962789Sahrens 		} else {
9631544Seschrock skip:
9641544Seschrock 			skipped += 1;
965789Sahrens 		}
966789Sahrens 	}
967789Sahrens 	mutex_exit(&evicted_state->mtx);
968789Sahrens 	mutex_exit(&state->mtx);
969789Sahrens 
970789Sahrens 	if (bytes_evicted < bytes)
971789Sahrens 		dprintf("only evicted %lld bytes from %x",
972789Sahrens 		    (longlong_t)bytes_evicted, state);
973789Sahrens 
9741544Seschrock 	atomic_add_64(&arc.skipped, skipped);
9751544Seschrock 	if (bytes < 0)
9761544Seschrock 		return (skipped);
977789Sahrens 	return (bytes_evicted);
978789Sahrens }
979789Sahrens 
980789Sahrens /*
981789Sahrens  * Remove buffers from list until we've removed the specified number of
982789Sahrens  * bytes.  Destroy the buffers that are removed.
983789Sahrens  */
984789Sahrens static void
9851544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
986789Sahrens {
987789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
988789Sahrens 	kmutex_t *hash_lock;
9891544Seschrock 	uint64_t bytes_deleted = 0;
9901544Seschrock 	uint_t bufs_skipped = 0;
991789Sahrens 
9921544Seschrock 	ASSERT(GHOST_STATE(state));
993789Sahrens top:
994789Sahrens 	mutex_enter(&state->mtx);
995789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
996789Sahrens 		ab_prev = list_prev(&state->list, ab);
997789Sahrens 		hash_lock = HDR_LOCK(ab);
998789Sahrens 		if (mutex_tryenter(hash_lock)) {
9991544Seschrock 			ASSERT(ab->b_buf == NULL);
1000789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
1001789Sahrens 			mutex_exit(hash_lock);
1002789Sahrens 			atomic_add_64(&arc.deleted, 1);
10031544Seschrock 			bytes_deleted += ab->b_size;
10041544Seschrock 			arc_hdr_destroy(ab);
1005789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1006789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1007789Sahrens 				break;
1008789Sahrens 		} else {
1009789Sahrens 			if (bytes < 0) {
1010789Sahrens 				mutex_exit(&state->mtx);
1011789Sahrens 				mutex_enter(hash_lock);
1012789Sahrens 				mutex_exit(hash_lock);
1013789Sahrens 				goto top;
1014789Sahrens 			}
1015789Sahrens 			bufs_skipped += 1;
1016789Sahrens 		}
1017789Sahrens 	}
1018789Sahrens 	mutex_exit(&state->mtx);
1019789Sahrens 
1020789Sahrens 	if (bufs_skipped) {
1021789Sahrens 		atomic_add_64(&arc.skipped, bufs_skipped);
1022789Sahrens 		ASSERT(bytes >= 0);
1023789Sahrens 	}
1024789Sahrens 
1025789Sahrens 	if (bytes_deleted < bytes)
1026789Sahrens 		dprintf("only deleted %lld bytes from %p",
1027789Sahrens 		    (longlong_t)bytes_deleted, state);
1028789Sahrens }
1029789Sahrens 
1030789Sahrens static void
1031789Sahrens arc_adjust(void)
1032789Sahrens {
1033789Sahrens 	int64_t top_sz, mru_over, arc_over;
1034789Sahrens 
10351544Seschrock 	top_sz = arc.anon->size + arc.mru->size;
1036789Sahrens 
10371544Seschrock 	if (top_sz > arc.p && arc.mru->lsize > 0) {
10381544Seschrock 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
10391544Seschrock 		(void) arc_evict(arc.mru, toevict);
10401544Seschrock 		top_sz = arc.anon->size + arc.mru->size;
1041789Sahrens 	}
1042789Sahrens 
10431544Seschrock 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1044789Sahrens 
1045789Sahrens 	if (mru_over > 0) {
10461544Seschrock 		if (arc.mru_ghost->lsize > 0) {
10471544Seschrock 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
10481544Seschrock 			arc_evict_ghost(arc.mru_ghost, todelete);
1049789Sahrens 		}
1050789Sahrens 	}
1051789Sahrens 
1052789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
10531544Seschrock 		int64_t tbl_over;
1054789Sahrens 
10551544Seschrock 		if (arc.mfu->lsize > 0) {
10561544Seschrock 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
10571544Seschrock 			(void) arc_evict(arc.mfu, toevict);
1058789Sahrens 		}
1059789Sahrens 
10601544Seschrock 		tbl_over = arc.size + arc.mru_ghost->lsize +
10611544Seschrock 		    arc.mfu_ghost->lsize - arc.c*2;
1062789Sahrens 
10631544Seschrock 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
10641544Seschrock 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
10651544Seschrock 			arc_evict_ghost(arc.mfu_ghost, todelete);
1066789Sahrens 		}
1067789Sahrens 	}
1068789Sahrens }
1069789Sahrens 
10701544Seschrock static void
10711544Seschrock arc_do_user_evicts(void)
10721544Seschrock {
10731544Seschrock 	mutex_enter(&arc_eviction_mtx);
10741544Seschrock 	while (arc_eviction_list != NULL) {
10751544Seschrock 		arc_buf_t *buf = arc_eviction_list;
10761544Seschrock 		arc_eviction_list = buf->b_next;
10771544Seschrock 		buf->b_hdr = NULL;
10781544Seschrock 		mutex_exit(&arc_eviction_mtx);
10791544Seschrock 
10801819Smaybee 		if (buf->b_efunc != NULL)
10811819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
10821544Seschrock 
10831544Seschrock 		buf->b_efunc = NULL;
10841544Seschrock 		buf->b_private = NULL;
10851544Seschrock 		kmem_cache_free(buf_cache, buf);
10861544Seschrock 		mutex_enter(&arc_eviction_mtx);
10871544Seschrock 	}
10881544Seschrock 	mutex_exit(&arc_eviction_mtx);
10891544Seschrock }
10901544Seschrock 
1091789Sahrens /*
1092789Sahrens  * Flush all *evictable* data from the cache.
1093789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1094789Sahrens  */
1095789Sahrens void
1096789Sahrens arc_flush(void)
1097789Sahrens {
10981544Seschrock 	while (arc_evict(arc.mru, -1));
10991544Seschrock 	while (arc_evict(arc.mfu, -1));
1100789Sahrens 
11011544Seschrock 	arc_evict_ghost(arc.mru_ghost, -1);
11021544Seschrock 	arc_evict_ghost(arc.mfu_ghost, -1);
11031544Seschrock 
11041544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
11051544Seschrock 	arc_do_user_evicts();
11061544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
11071544Seschrock 	ASSERT(arc_eviction_list == NULL);
1108789Sahrens }
1109789Sahrens 
1110789Sahrens void
1111789Sahrens arc_kmem_reclaim(void)
1112789Sahrens {
11131544Seschrock 	/* Remove 12.5% */
1114789Sahrens 	/*
1115789Sahrens 	 * We need arc_reclaim_lock because we don't want multiple
1116789Sahrens 	 * threads trying to reclaim concurrently.
1117789Sahrens 	 */
1118789Sahrens 
1119789Sahrens 	/*
1120789Sahrens 	 * umem calls the reclaim func when we destroy the buf cache,
1121789Sahrens 	 * which is after we do arc_fini().  So we set a flag to prevent
1122789Sahrens 	 * accessing the destroyed mutexes and lists.
1123789Sahrens 	 */
1124789Sahrens 	if (arc_dead)
1125789Sahrens 		return;
1126789Sahrens 
11271544Seschrock 	if (arc.c <= arc.c_min)
11281544Seschrock 		return;
11291544Seschrock 
1130789Sahrens 	mutex_enter(&arc_reclaim_lock);
1131789Sahrens 
11321544Seschrock 	atomic_add_64(&arc.c, -(arc.c >> 3));
11331544Seschrock 	atomic_add_64(&arc.p, -(arc.p >> 3));
11341544Seschrock 	if (arc.c > arc.size)
11351544Seschrock 		arc.c = arc.size;
1136789Sahrens 	if (arc.c < arc.c_min)
1137789Sahrens 		arc.c = arc.c_min;
11381544Seschrock 	if (arc.p > arc.c)
11391544Seschrock 		arc.p = (arc.c >> 1);
11401544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1141789Sahrens 
1142789Sahrens 	arc_adjust();
1143789Sahrens 
1144789Sahrens 	mutex_exit(&arc_reclaim_lock);
1145789Sahrens }
1146789Sahrens 
1147789Sahrens static int
1148789Sahrens arc_reclaim_needed(void)
1149789Sahrens {
1150789Sahrens 	uint64_t extra;
1151789Sahrens 
1152789Sahrens #ifdef _KERNEL
1153789Sahrens 	/*
1154789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1155789Sahrens 	 */
1156789Sahrens 	extra = desfree;
1157789Sahrens 
1158789Sahrens 	/*
1159789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1160789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1161789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1162789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1163789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1164789Sahrens 	 */
1165789Sahrens 	if (freemem < lotsfree + needfree + extra)
1166789Sahrens 		return (1);
1167789Sahrens 
1168789Sahrens 	/*
1169789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1170789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1171789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1172789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1173789Sahrens 	 * circumstances from getting really dire.
1174789Sahrens 	 */
1175789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1176789Sahrens 		return (1);
1177789Sahrens 
1178*1936Smaybee #if defined(__i386)
1179789Sahrens 	/*
1180789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1181789Sahrens 	 * kernel heap space before we ever run out of available physical
1182789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1183789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1184789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1185789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1186789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1187789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1188789Sahrens 	 * free)
1189789Sahrens 	 */
1190789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1191789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1192789Sahrens 		return (1);
1193789Sahrens #endif
1194789Sahrens 
1195789Sahrens #else
1196789Sahrens 	if (spa_get_random(100) == 0)
1197789Sahrens 		return (1);
1198789Sahrens #endif
1199789Sahrens 	return (0);
1200789Sahrens }
1201789Sahrens 
1202789Sahrens static void
1203789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1204789Sahrens {
1205789Sahrens 	size_t			i;
1206789Sahrens 	kmem_cache_t		*prev_cache = NULL;
1207789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
1208789Sahrens 
12091484Sek110237 #ifdef _KERNEL
12101484Sek110237 	/*
12111484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
12121484Sek110237 	 * up too much memory.
12131484Sek110237 	 */
12141505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1215*1936Smaybee 
1216*1936Smaybee #if defined(__i386)
1217*1936Smaybee 	/*
1218*1936Smaybee 	 * Reclaim unused memory from all kmem caches.
1219*1936Smaybee 	 */
1220*1936Smaybee 	kmem_reap();
1221*1936Smaybee #endif
12221484Sek110237 #endif
12231484Sek110237 
1224789Sahrens 	/*
12251544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
12261544Seschrock 	 * reap free buffers from the arc kmem caches.
1227789Sahrens 	 */
1228789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
12291544Seschrock 		arc_kmem_reclaim();
1230789Sahrens 
1231789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1232789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1233789Sahrens 			prev_cache = zio_buf_cache[i];
1234789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1235789Sahrens 		}
1236789Sahrens 	}
12371544Seschrock 	kmem_cache_reap_now(buf_cache);
12381544Seschrock 	kmem_cache_reap_now(hdr_cache);
1239789Sahrens }
1240789Sahrens 
1241789Sahrens static void
1242789Sahrens arc_reclaim_thread(void)
1243789Sahrens {
1244789Sahrens 	clock_t			growtime = 0;
1245789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1246789Sahrens 	callb_cpr_t		cpr;
1247789Sahrens 
1248789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1249789Sahrens 
1250789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1251789Sahrens 	while (arc_thread_exit == 0) {
1252789Sahrens 		if (arc_reclaim_needed()) {
1253789Sahrens 
1254789Sahrens 			if (arc.no_grow) {
1255789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1256789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1257789Sahrens 				} else {
1258789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1259789Sahrens 				}
1260789Sahrens 			} else {
1261789Sahrens 				arc.no_grow = TRUE;
1262789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1263789Sahrens 				membar_producer();
1264789Sahrens 			}
1265789Sahrens 
1266789Sahrens 			/* reset the growth delay for every reclaim */
1267789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
1268789Sahrens 
1269789Sahrens 			arc_kmem_reap_now(last_reclaim);
1270789Sahrens 
1271789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1272789Sahrens 			arc.no_grow = FALSE;
1273789Sahrens 		}
1274789Sahrens 
12751544Seschrock 		if (arc_eviction_list != NULL)
12761544Seschrock 			arc_do_user_evicts();
12771544Seschrock 
1278789Sahrens 		/* block until needed, or one second, whichever is shorter */
1279789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1280789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1281789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1282789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1283789Sahrens 	}
1284789Sahrens 
1285789Sahrens 	arc_thread_exit = 0;
1286789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1287789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1288789Sahrens 	thread_exit();
1289789Sahrens }
1290789Sahrens 
12911544Seschrock /*
12921544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
12931544Seschrock  * the state that we are comming from.  This function is only called
12941544Seschrock  * when we are adding new content to the cache.
12951544Seschrock  */
1296789Sahrens static void
12971544Seschrock arc_adapt(int bytes, arc_state_t *state)
1298789Sahrens {
12991544Seschrock 	int mult;
13001544Seschrock 
13011544Seschrock 	ASSERT(bytes > 0);
1302789Sahrens 	/*
13031544Seschrock 	 * Adapt the target size of the MRU list:
13041544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
13051544Seschrock 	 *	  the target size of the MRU list.
13061544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
13071544Seschrock 	 *	  the target size of the MFU list by decreasing the
13081544Seschrock 	 *	  target size of the MRU list.
1309789Sahrens 	 */
13101544Seschrock 	if (state == arc.mru_ghost) {
13111544Seschrock 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
13121544Seschrock 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
13131544Seschrock 
13141544Seschrock 		arc.p = MIN(arc.c, arc.p + bytes * mult);
13151544Seschrock 	} else if (state == arc.mfu_ghost) {
13161544Seschrock 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
13171544Seschrock 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
13181544Seschrock 
13191544Seschrock 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
13201544Seschrock 	}
13211544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1322789Sahrens 
1323789Sahrens 	if (arc_reclaim_needed()) {
1324789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1325789Sahrens 		return;
1326789Sahrens 	}
1327789Sahrens 
1328789Sahrens 	if (arc.no_grow)
1329789Sahrens 		return;
1330789Sahrens 
13311544Seschrock 	if (arc.c >= arc.c_max)
13321544Seschrock 		return;
13331544Seschrock 
1334789Sahrens 	/*
13351544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
13361544Seschrock 	 * cache size, increment the target cache size
1337789Sahrens 	 */
13381544Seschrock 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
13391544Seschrock 		atomic_add_64(&arc.c, (int64_t)bytes);
1340789Sahrens 		if (arc.c > arc.c_max)
1341789Sahrens 			arc.c = arc.c_max;
13421544Seschrock 		else if (state == arc.anon)
13431544Seschrock 			atomic_add_64(&arc.p, (int64_t)bytes);
13441544Seschrock 		if (arc.p > arc.c)
13451544Seschrock 			arc.p = arc.c;
1346789Sahrens 	}
13471544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1348789Sahrens }
1349789Sahrens 
1350789Sahrens /*
13511544Seschrock  * Check if the cache has reached its limits and eviction is required
13521544Seschrock  * prior to insert.
1353789Sahrens  */
1354789Sahrens static int
1355789Sahrens arc_evict_needed()
1356789Sahrens {
1357789Sahrens 	if (arc_reclaim_needed())
1358789Sahrens 		return (1);
1359789Sahrens 
13601544Seschrock 	return (arc.size > arc.c);
1361789Sahrens }
1362789Sahrens 
1363789Sahrens /*
1364789Sahrens  * The state, supplied as the first argument, is going to have something
1365789Sahrens  * inserted on its behalf. So, determine which cache must be victimized to
1366789Sahrens  * satisfy an insertion for this state.  We have the following cases:
1367789Sahrens  *
13681544Seschrock  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1369789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1370789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1371789Sahrens  *
13721544Seschrock  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1373789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1374789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1375789Sahrens  * entries.
1376789Sahrens  *
13771544Seschrock  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1378789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1379789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1380789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1381789Sahrens  *
13821544Seschrock  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1383789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1384789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1385789Sahrens  */
1386789Sahrens static void
1387789Sahrens arc_evict_for_state(arc_state_t *state, uint64_t bytes)
1388789Sahrens {
1389789Sahrens 	uint64_t	mru_used;
1390789Sahrens 	uint64_t	mfu_space;
1391789Sahrens 	uint64_t	evicted;
1392789Sahrens 
13931544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
1394789Sahrens 
13951544Seschrock 	if (state == arc.mru) {
13961544Seschrock 		mru_used = arc.anon->size + arc.mru->size;
1397789Sahrens 		if (arc.p > mru_used) {
1398789Sahrens 			/* case 1 */
13991544Seschrock 			evicted = arc_evict(arc.mfu, bytes);
1400789Sahrens 			if (evicted < bytes) {
1401789Sahrens 				arc_adjust();
1402789Sahrens 			}
1403789Sahrens 		} else {
1404789Sahrens 			/* case 2 */
14051544Seschrock 			evicted = arc_evict(arc.mru, bytes);
1406789Sahrens 			if (evicted < bytes) {
1407789Sahrens 				arc_adjust();
1408789Sahrens 			}
1409789Sahrens 		}
1410789Sahrens 	} else {
14111544Seschrock 		/* MFU case */
1412789Sahrens 		mfu_space = arc.c - arc.p;
14131544Seschrock 		if (mfu_space > arc.mfu->size) {
1414789Sahrens 			/* case 3 */
14151544Seschrock 			evicted = arc_evict(arc.mru, bytes);
1416789Sahrens 			if (evicted < bytes) {
1417789Sahrens 				arc_adjust();
1418789Sahrens 			}
1419789Sahrens 		} else {
1420789Sahrens 			/* case 4 */
14211544Seschrock 			evicted = arc_evict(arc.mfu, bytes);
1422789Sahrens 			if (evicted < bytes) {
1423789Sahrens 				arc_adjust();
1424789Sahrens 			}
1425789Sahrens 		}
1426789Sahrens 	}
1427789Sahrens }
1428789Sahrens 
1429789Sahrens /*
1430789Sahrens  * This routine is called whenever a buffer is accessed.
14311544Seschrock  * NOTE: the hash lock is dropped in this function.
1432789Sahrens  */
1433789Sahrens static void
14341544Seschrock arc_access_and_exit(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1435789Sahrens {
14361544Seschrock 	arc_state_t	*evict_state = NULL;
14371544Seschrock 	int		blksz;
1438789Sahrens 
1439789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1440789Sahrens 
1441789Sahrens 	blksz = buf->b_size;
1442789Sahrens 
1443789Sahrens 	if (buf->b_state == arc.anon) {
1444789Sahrens 		/*
1445789Sahrens 		 * This buffer is not in the cache, and does not
1446789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1447789Sahrens 		 * to the MRU state.
1448789Sahrens 		 */
1449789Sahrens 
14501544Seschrock 		arc_adapt(blksz, arc.anon);
14511544Seschrock 		if (arc_evict_needed())
14521544Seschrock 			evict_state = arc.mru;
1453789Sahrens 
1454789Sahrens 		ASSERT(buf->b_arc_access == 0);
1455789Sahrens 		buf->b_arc_access = lbolt;
14561544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
14571544Seschrock 		arc_change_state(arc.mru, buf, hash_lock);
1458789Sahrens 
14591544Seschrock 	} else if (buf->b_state == arc.mru) {
1460789Sahrens 		/*
1461789Sahrens 		 * If this buffer is in the MRU-top state and has the prefetch
1462789Sahrens 		 * flag, the first read was actually part of a prefetch.  In
1463789Sahrens 		 * this situation, we simply want to clear the flag and return.
1464789Sahrens 		 * A subsequent access should bump this into the MFU state.
1465789Sahrens 		 */
1466789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1467789Sahrens 			buf->b_flags &= ~ARC_PREFETCH;
14681544Seschrock 			atomic_add_64(&arc.mru->hits, 1);
14691544Seschrock 			mutex_exit(hash_lock);
1470789Sahrens 			return;
1471789Sahrens 		}
1472789Sahrens 
1473789Sahrens 		/*
1474789Sahrens 		 * This buffer has been "accessed" only once so far,
1475789Sahrens 		 * but it is still in the cache. Move it to the MFU
1476789Sahrens 		 * state.
1477789Sahrens 		 */
1478789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1479789Sahrens 			/*
1480789Sahrens 			 * More than 125ms have passed since we
1481789Sahrens 			 * instantiated this buffer.  Move it to the
1482789Sahrens 			 * most frequently used state.
1483789Sahrens 			 */
1484789Sahrens 			buf->b_arc_access = lbolt;
14851544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
14861544Seschrock 			arc_change_state(arc.mfu, buf, hash_lock);
1487789Sahrens 		}
14881544Seschrock 		atomic_add_64(&arc.mru->hits, 1);
14891544Seschrock 	} else if (buf->b_state == arc.mru_ghost) {
1490789Sahrens 		arc_state_t	*new_state;
1491789Sahrens 		/*
1492789Sahrens 		 * This buffer has been "accessed" recently, but
1493789Sahrens 		 * was evicted from the cache.  Move it to the
1494789Sahrens 		 * MFU state.
1495789Sahrens 		 */
1496789Sahrens 
1497789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
14981544Seschrock 			new_state = arc.mru;
14991544Seschrock 			buf->b_flags &= ~ARC_PREFETCH;
15001544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1501789Sahrens 		} else {
15021544Seschrock 			new_state = arc.mfu;
15031544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1504789Sahrens 		}
1505789Sahrens 
15061544Seschrock 		arc_adapt(blksz, arc.mru_ghost);
15071544Seschrock 		if (arc_evict_needed())
15081544Seschrock 			evict_state = new_state;
1509789Sahrens 
1510789Sahrens 		buf->b_arc_access = lbolt;
1511789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1512789Sahrens 
15131544Seschrock 		atomic_add_64(&arc.mru_ghost->hits, 1);
15141544Seschrock 	} else if (buf->b_state == arc.mfu) {
1515789Sahrens 		/*
1516789Sahrens 		 * This buffer has been accessed more than once and is
1517789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1518789Sahrens 		 *
1519789Sahrens 		 * NOTE: the add_reference() that occurred when we did
1520789Sahrens 		 * the arc_read() should have kicked this off the list,
1521789Sahrens 		 * so even if it was a prefetch, it will be put back at
1522789Sahrens 		 * the head of the list when we remove_reference().
1523789Sahrens 		 */
15241544Seschrock 		atomic_add_64(&arc.mfu->hits, 1);
15251544Seschrock 	} else if (buf->b_state == arc.mfu_ghost) {
1526789Sahrens 		/*
1527789Sahrens 		 * This buffer has been accessed more than once but has
1528789Sahrens 		 * been evicted from the cache.  Move it back to the
1529789Sahrens 		 * MFU state.
1530789Sahrens 		 */
1531789Sahrens 
15321544Seschrock 		arc_adapt(blksz, arc.mfu_ghost);
15331544Seschrock 		if (arc_evict_needed())
15341544Seschrock 			evict_state = arc.mfu;
1535789Sahrens 
1536789Sahrens 		buf->b_arc_access = lbolt;
15371544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
15381544Seschrock 		arc_change_state(arc.mfu, buf, hash_lock);
1539789Sahrens 
15401544Seschrock 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1541789Sahrens 	} else {
1542789Sahrens 		ASSERT(!"invalid arc state");
1543789Sahrens 	}
1544789Sahrens 
15451544Seschrock 	mutex_exit(hash_lock);
15461544Seschrock 	if (evict_state)
15471544Seschrock 		arc_evict_for_state(evict_state, blksz);
1548789Sahrens }
1549789Sahrens 
1550789Sahrens /* a generic arc_done_func_t which you can use */
1551789Sahrens /* ARGSUSED */
1552789Sahrens void
1553789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1554789Sahrens {
1555789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
15561544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1557789Sahrens }
1558789Sahrens 
1559789Sahrens /* a generic arc_done_func_t which you can use */
1560789Sahrens void
1561789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1562789Sahrens {
1563789Sahrens 	arc_buf_t **bufp = arg;
1564789Sahrens 	if (zio && zio->io_error) {
15651544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1566789Sahrens 		*bufp = NULL;
1567789Sahrens 	} else {
1568789Sahrens 		*bufp = buf;
1569789Sahrens 	}
1570789Sahrens }
1571789Sahrens 
1572789Sahrens static void
1573789Sahrens arc_read_done(zio_t *zio)
1574789Sahrens {
15751589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1576789Sahrens 	arc_buf_t	*buf;
1577789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1578789Sahrens 	kmutex_t	*hash_lock;
1579789Sahrens 	arc_callback_t	*callback_list, *acb;
1580789Sahrens 	int		freeable = FALSE;
1581789Sahrens 
1582789Sahrens 	buf = zio->io_private;
1583789Sahrens 	hdr = buf->b_hdr;
1584789Sahrens 
15851589Smaybee 	/*
15861589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
15871589Smaybee 	 * prior to starting I/O.  We should find this header, since
15881589Smaybee 	 * it's in the hash table, and it should be legit since it's
15891589Smaybee 	 * not possible to evict it during the I/O.  The only possible
15901589Smaybee 	 * reason for it not to be found is if we were freed during the
15911589Smaybee 	 * read.
15921589Smaybee 	 */
15931589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1594789Sahrens 		    &hash_lock);
1595789Sahrens 
15961589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
15971589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1598789Sahrens 
1599789Sahrens 	/* byteswap if necessary */
1600789Sahrens 	callback_list = hdr->b_acb;
1601789Sahrens 	ASSERT(callback_list != NULL);
1602789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1603789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1604789Sahrens 
1605789Sahrens 	/* create copies of the data buffer for the callers */
1606789Sahrens 	abuf = buf;
1607789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1608789Sahrens 		if (acb->acb_done) {
1609789Sahrens 			if (abuf == NULL) {
1610789Sahrens 				abuf = kmem_cache_alloc(buf_cache, KM_SLEEP);
16111544Seschrock 				abuf->b_data = arc_data_copy(hdr, buf->b_data);
1612789Sahrens 				abuf->b_hdr = hdr;
16131544Seschrock 				abuf->b_efunc = NULL;
16141544Seschrock 				abuf->b_private = NULL;
1615789Sahrens 				abuf->b_next = hdr->b_buf;
1616789Sahrens 				hdr->b_buf = abuf;
16171544Seschrock 				hdr->b_datacnt += 1;
1618789Sahrens 			}
1619789Sahrens 			acb->acb_buf = abuf;
1620789Sahrens 			abuf = NULL;
1621789Sahrens 		} else {
1622789Sahrens 			/*
1623789Sahrens 			 * The caller did not provide a callback function.
1624789Sahrens 			 * In this case, we should just remove the reference.
1625789Sahrens 			 */
1626789Sahrens 			if (HDR_FREED_IN_READ(hdr)) {
1627789Sahrens 				ASSERT3P(hdr->b_state, ==, arc.anon);
1628789Sahrens 				(void) refcount_remove(&hdr->b_refcnt,
1629789Sahrens 				    acb->acb_private);
1630789Sahrens 			} else {
1631789Sahrens 				(void) remove_reference(hdr, hash_lock,
1632789Sahrens 				    acb->acb_private);
1633789Sahrens 			}
1634789Sahrens 		}
1635789Sahrens 	}
1636789Sahrens 	hdr->b_acb = NULL;
1637789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
16381544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
16391544Seschrock 	if (abuf == buf)
16401544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1641789Sahrens 
1642789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1643789Sahrens 
1644789Sahrens 	if (zio->io_error != 0) {
1645789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1646789Sahrens 		if (hdr->b_state != arc.anon)
1647789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
16481544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
16491544Seschrock 			buf_hash_remove(hdr);
1650789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
16511544Seschrock 		/* translate checksum errors into IO errors */
16521544Seschrock 		if (zio->io_error == ECKSUM)
16531544Seschrock 			zio->io_error = EIO;
1654789Sahrens 	}
1655789Sahrens 
16561544Seschrock 	/*
16571544Seschrock 	 * Broadcast before we drop the hash_lock.  This is less efficient,
16581544Seschrock 	 * but avoids the possibility that the hdr (and hence the cv) might
16591544Seschrock 	 * be freed before we get to the cv_broadcast().
16601544Seschrock 	 */
16611544Seschrock 	cv_broadcast(&hdr->b_cv);
16621544Seschrock 
16631589Smaybee 	if (hash_lock) {
1664789Sahrens 		/*
1665789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1666789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1667789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1668789Sahrens 		 * getting confused).
1669789Sahrens 		 */
1670789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
16711544Seschrock 			arc_access_and_exit(hdr, hash_lock);
16721544Seschrock 		else
16731544Seschrock 			mutex_exit(hash_lock);
1674789Sahrens 	} else {
1675789Sahrens 		/*
1676789Sahrens 		 * This block was freed while we waited for the read to
1677789Sahrens 		 * complete.  It has been removed from the hash table and
1678789Sahrens 		 * moved to the anonymous state (so that it won't show up
1679789Sahrens 		 * in the cache).
1680789Sahrens 		 */
1681789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1682789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1683789Sahrens 	}
1684789Sahrens 
1685789Sahrens 	/* execute each callback and free its structure */
1686789Sahrens 	while ((acb = callback_list) != NULL) {
1687789Sahrens 		if (acb->acb_done)
1688789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1689789Sahrens 
1690789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1691789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1692789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1693789Sahrens 		}
1694789Sahrens 
1695789Sahrens 		callback_list = acb->acb_next;
1696789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1697789Sahrens 	}
1698789Sahrens 
1699789Sahrens 	if (freeable)
17001544Seschrock 		arc_hdr_destroy(hdr);
1701789Sahrens }
1702789Sahrens 
1703789Sahrens /*
1704789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1705789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1706789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1707789Sahrens  * in the callback will be NULL in this case, since no IO was
1708789Sahrens  * required.  If the block is not in the cache pass the read request
1709789Sahrens  * on to the spa with a substitute callback function, so that the
1710789Sahrens  * requested block will be added to the cache.
1711789Sahrens  *
1712789Sahrens  * If a read request arrives for a block that has a read in-progress,
1713789Sahrens  * either wait for the in-progress read to complete (and return the
1714789Sahrens  * results); or, if this is a read with a "done" func, add a record
1715789Sahrens  * to the read to invoke the "done" func when the read completes,
1716789Sahrens  * and return; or just return.
1717789Sahrens  *
1718789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1719789Sahrens  * for readers of this block.
1720789Sahrens  */
1721789Sahrens int
1722789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1723789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
17241544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
1725789Sahrens {
1726789Sahrens 	arc_buf_hdr_t *hdr;
1727789Sahrens 	arc_buf_t *buf;
1728789Sahrens 	kmutex_t *hash_lock;
1729789Sahrens 	zio_t	*rzio;
1730789Sahrens 
1731789Sahrens top:
1732789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
17331544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
1734789Sahrens 
1735789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
1736789Sahrens 			if ((arc_flags & ARC_NOWAIT) && done) {
1737789Sahrens 				arc_callback_t	*acb = NULL;
1738789Sahrens 
1739789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1740789Sahrens 				    KM_SLEEP);
1741789Sahrens 				acb->acb_done = done;
1742789Sahrens 				acb->acb_private = private;
1743789Sahrens 				acb->acb_byteswap = swap;
1744789Sahrens 				if (pio != NULL)
1745789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1746789Sahrens 					    spa, NULL, NULL, flags);
1747789Sahrens 
1748789Sahrens 				ASSERT(acb->acb_done != NULL);
1749789Sahrens 				acb->acb_next = hdr->b_acb;
1750789Sahrens 				hdr->b_acb = acb;
1751789Sahrens 				add_reference(hdr, hash_lock, private);
1752789Sahrens 				mutex_exit(hash_lock);
1753789Sahrens 				return (0);
1754789Sahrens 			} else if (arc_flags & ARC_WAIT) {
1755789Sahrens 				cv_wait(&hdr->b_cv, hash_lock);
1756789Sahrens 				mutex_exit(hash_lock);
1757789Sahrens 				goto top;
1758789Sahrens 			}
1759789Sahrens 			mutex_exit(hash_lock);
1760789Sahrens 			return (0);
1761789Sahrens 		}
1762789Sahrens 
17631544Seschrock 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1764789Sahrens 
17651544Seschrock 		if (done) {
17661544Seschrock 			/*
17671544Seschrock 			 * If this block is already in use, create a new
17681544Seschrock 			 * copy of the data so that we will be guaranteed
17691544Seschrock 			 * that arc_release() will always succeed.
17701544Seschrock 			 */
17711544Seschrock 			buf = hdr->b_buf;
17721544Seschrock 			ASSERT(buf);
17731544Seschrock 			ASSERT(buf->b_data);
17741544Seschrock 			if (!HDR_BUF_AVAILABLE(hdr)) {
17751544Seschrock 				void *data = arc_data_copy(hdr, buf->b_data);
17761544Seschrock 				buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
17771544Seschrock 				buf->b_hdr = hdr;
17781544Seschrock 				buf->b_data = data;
17791544Seschrock 				buf->b_efunc = NULL;
17801544Seschrock 				buf->b_private = NULL;
17811544Seschrock 				buf->b_next = hdr->b_buf;
17821544Seschrock 				hdr->b_buf = buf;
17831544Seschrock 				hdr->b_datacnt += 1;
17841544Seschrock 			} else {
17851544Seschrock 				ASSERT(buf->b_efunc == NULL);
17861544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
17871544Seschrock 			}
1788789Sahrens 			add_reference(hdr, hash_lock, private);
1789789Sahrens 		}
1790789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
17911544Seschrock 		arc_access_and_exit(hdr, hash_lock);
1792789Sahrens 		atomic_add_64(&arc.hits, 1);
1793789Sahrens 		if (done)
1794789Sahrens 			done(NULL, buf, private);
1795789Sahrens 	} else {
1796789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1797789Sahrens 		arc_callback_t	*acb;
1798789Sahrens 
1799789Sahrens 		if (hdr == NULL) {
1800789Sahrens 			/* this block is not in the cache */
1801789Sahrens 			arc_buf_hdr_t	*exists;
1802789Sahrens 
1803789Sahrens 			buf = arc_buf_alloc(spa, size, private);
1804789Sahrens 			hdr = buf->b_hdr;
1805789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1806789Sahrens 			hdr->b_birth = bp->blk_birth;
1807789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1808789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1809789Sahrens 			if (exists) {
1810789Sahrens 				/* somebody beat us to the hash insert */
1811789Sahrens 				mutex_exit(hash_lock);
1812789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1813789Sahrens 				hdr->b_birth = 0;
1814789Sahrens 				hdr->b_cksum0 = 0;
18151544Seschrock 				(void) arc_buf_remove_ref(buf, private);
1816789Sahrens 				goto top; /* restart the IO request */
1817789Sahrens 			}
1818789Sahrens 
1819789Sahrens 		} else {
1820789Sahrens 			/* this block is in the ghost cache */
18211544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
18221544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1823789Sahrens 			add_reference(hdr, hash_lock, private);
18241544Seschrock 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
1825789Sahrens 
18261544Seschrock 			ASSERT(hdr->b_buf == NULL);
1827789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
18281544Seschrock 			buf->b_hdr = hdr;
18291544Seschrock 			buf->b_efunc = NULL;
18301544Seschrock 			buf->b_private = NULL;
18311544Seschrock 			buf->b_next = NULL;
18321544Seschrock 			hdr->b_buf = buf;
1833789Sahrens 			buf->b_data = zio_buf_alloc(hdr->b_size);
1834789Sahrens 			atomic_add_64(&arc.size, hdr->b_size);
18351544Seschrock 			ASSERT(hdr->b_datacnt == 0);
18361544Seschrock 			hdr->b_datacnt = 1;
1837789Sahrens 		}
1838789Sahrens 
1839789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1840789Sahrens 		acb->acb_done = done;
1841789Sahrens 		acb->acb_private = private;
1842789Sahrens 		acb->acb_byteswap = swap;
1843789Sahrens 
1844789Sahrens 		ASSERT(hdr->b_acb == NULL);
1845789Sahrens 		hdr->b_acb = acb;
1846789Sahrens 
1847789Sahrens 		/*
1848789Sahrens 		 * If this DVA is part of a prefetch, mark the buf
1849789Sahrens 		 * header with the prefetch flag
1850789Sahrens 		 */
1851789Sahrens 		if (arc_flags & ARC_PREFETCH)
1852789Sahrens 			hdr->b_flags |= ARC_PREFETCH;
1853789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1854789Sahrens 
1855789Sahrens 		/*
1856789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
1857789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
1858789Sahrens 		 * the header will be marked as I/O in progress and have an
1859789Sahrens 		 * attached buffer.  At this point, anybody who finds this
1860789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
1861789Sahrens 		 */
1862789Sahrens 
18631544Seschrock 		if (GHOST_STATE(hdr->b_state))
18641544Seschrock 			arc_access_and_exit(hdr, hash_lock);
18651544Seschrock 		else
18661544Seschrock 			mutex_exit(hash_lock);
1867789Sahrens 
1868789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
18691596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
18701596Sahrens 		    zbookmark_t *, zb);
1871789Sahrens 		atomic_add_64(&arc.misses, 1);
18721544Seschrock 
1873789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
18741544Seschrock 		    arc_read_done, buf, priority, flags, zb);
1875789Sahrens 
1876789Sahrens 		if (arc_flags & ARC_WAIT)
1877789Sahrens 			return (zio_wait(rzio));
1878789Sahrens 
1879789Sahrens 		ASSERT(arc_flags & ARC_NOWAIT);
1880789Sahrens 		zio_nowait(rzio);
1881789Sahrens 	}
1882789Sahrens 	return (0);
1883789Sahrens }
1884789Sahrens 
1885789Sahrens /*
1886789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
1887789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1888789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
1889789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1890789Sahrens  */
1891789Sahrens int
1892789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1893789Sahrens {
1894789Sahrens 	arc_buf_hdr_t *hdr;
1895789Sahrens 	kmutex_t *hash_mtx;
1896789Sahrens 	int rc = 0;
1897789Sahrens 
1898789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1899789Sahrens 
19001544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
19011544Seschrock 		arc_buf_t *buf = hdr->b_buf;
19021544Seschrock 
19031544Seschrock 		ASSERT(buf);
19041544Seschrock 		while (buf->b_data == NULL) {
19051544Seschrock 			buf = buf->b_next;
19061544Seschrock 			ASSERT(buf);
19071544Seschrock 		}
19081544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
19091544Seschrock 	} else {
1910789Sahrens 		rc = ENOENT;
19111544Seschrock 	}
1912789Sahrens 
1913789Sahrens 	if (hash_mtx)
1914789Sahrens 		mutex_exit(hash_mtx);
1915789Sahrens 
1916789Sahrens 	return (rc);
1917789Sahrens }
1918789Sahrens 
19191544Seschrock void
19201544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
19211544Seschrock {
19221544Seschrock 	ASSERT(buf->b_hdr != NULL);
19231544Seschrock 	ASSERT(buf->b_hdr->b_state != arc.anon);
19241544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
19251544Seschrock 	buf->b_efunc = func;
19261544Seschrock 	buf->b_private = private;
19271544Seschrock }
19281544Seschrock 
19291544Seschrock /*
19301544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
19311544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
19321544Seschrock  * is not yet in the evicted state, it will be put there.
19331544Seschrock  */
19341544Seschrock int
19351544Seschrock arc_buf_evict(arc_buf_t *buf)
19361544Seschrock {
19371544Seschrock 	arc_buf_hdr_t *hdr;
19381544Seschrock 	kmutex_t *hash_lock;
19391544Seschrock 	arc_buf_t **bufp;
19401544Seschrock 
19411544Seschrock 	mutex_enter(&arc_eviction_mtx);
19421544Seschrock 	hdr = buf->b_hdr;
19431544Seschrock 	if (hdr == NULL) {
19441544Seschrock 		/*
19451544Seschrock 		 * We are in arc_do_user_evicts().
19461544Seschrock 		 * NOTE: We can't be in arc_buf_add_ref() because
19471544Seschrock 		 * that would violate the interface rules.
19481544Seschrock 		 */
19491544Seschrock 		ASSERT(buf->b_data == NULL);
19501544Seschrock 		mutex_exit(&arc_eviction_mtx);
19511544Seschrock 		return (0);
19521544Seschrock 	} else if (buf->b_data == NULL) {
19531819Smaybee 		arc_buf_t copy = *buf; /* structure assignment */
19541544Seschrock 		/*
19551819Smaybee 		 * We are on the eviction list.  Process this buffer
19561819Smaybee 		 * now but let arc_do_user_evicts() do the reaping.
19571544Seschrock 		 */
19581819Smaybee 		buf->b_efunc = NULL;
19591819Smaybee 		buf->b_hdr = NULL;
19601544Seschrock 		mutex_exit(&arc_eviction_mtx);
19611819Smaybee 		VERIFY(copy.b_efunc(&copy) == 0);
19621819Smaybee 		return (1);
19631544Seschrock 	} else {
19641544Seschrock 		/*
19651544Seschrock 		 * Prevent a race with arc_evict()
19661544Seschrock 		 */
19671544Seschrock 		ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
19681544Seschrock 		buf->b_hdr = NULL;
19691544Seschrock 	}
19701544Seschrock 	mutex_exit(&arc_eviction_mtx);
19711544Seschrock 
19721544Seschrock 	hash_lock = HDR_LOCK(hdr);
19731544Seschrock 	mutex_enter(hash_lock);
19741544Seschrock 
19751544Seschrock 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
19761544Seschrock 
19771544Seschrock 	/*
19781544Seschrock 	 * Pull this buffer off of the hdr
19791544Seschrock 	 */
19801544Seschrock 	bufp = &hdr->b_buf;
19811544Seschrock 	while (*bufp != buf)
19821544Seschrock 		bufp = &(*bufp)->b_next;
19831544Seschrock 	*bufp = buf->b_next;
19841544Seschrock 
19851544Seschrock 	ASSERT(buf->b_data != NULL);
19861544Seschrock 	buf->b_hdr = hdr;
19871544Seschrock 	arc_buf_destroy(buf, FALSE);
19881544Seschrock 
19891544Seschrock 	if (hdr->b_datacnt == 0) {
19901544Seschrock 		arc_state_t *old_state = hdr->b_state;
19911544Seschrock 		arc_state_t *evicted_state;
19921544Seschrock 
19931544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
19941544Seschrock 
19951544Seschrock 		evicted_state =
19961544Seschrock 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
19971544Seschrock 
19981544Seschrock 		mutex_enter(&old_state->mtx);
19991544Seschrock 		mutex_enter(&evicted_state->mtx);
20001544Seschrock 
20011544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
20021544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
20031544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
20041544Seschrock 
20051544Seschrock 		mutex_exit(&evicted_state->mtx);
20061544Seschrock 		mutex_exit(&old_state->mtx);
20071544Seschrock 	}
20081544Seschrock 	mutex_exit(hash_lock);
20091819Smaybee 
20101544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
20111544Seschrock 	buf->b_efunc = NULL;
20121544Seschrock 	buf->b_private = NULL;
20131544Seschrock 	buf->b_hdr = NULL;
20141544Seschrock 	kmem_cache_free(buf_cache, buf);
20151544Seschrock 	return (1);
20161544Seschrock }
20171544Seschrock 
2018789Sahrens /*
2019789Sahrens  * Release this buffer from the cache.  This must be done
2020789Sahrens  * after a read and prior to modifying the buffer contents.
2021789Sahrens  * If the buffer has more than one reference, we must make
2022789Sahrens  * make a new hdr for the buffer.
2023789Sahrens  */
2024789Sahrens void
2025789Sahrens arc_release(arc_buf_t *buf, void *tag)
2026789Sahrens {
2027789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2028789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2029789Sahrens 
2030789Sahrens 	/* this buffer is not on any list */
2031789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2032789Sahrens 
2033789Sahrens 	if (hdr->b_state == arc.anon) {
2034789Sahrens 		/* this buffer is already released */
2035789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2036789Sahrens 		ASSERT(BUF_EMPTY(hdr));
20371544Seschrock 		ASSERT(buf->b_efunc == NULL);
2038789Sahrens 		return;
2039789Sahrens 	}
2040789Sahrens 
2041789Sahrens 	mutex_enter(hash_lock);
2042789Sahrens 
20431544Seschrock 	/*
20441544Seschrock 	 * Do we have more than one buf?
20451544Seschrock 	 */
20461544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2047789Sahrens 		arc_buf_hdr_t *nhdr;
2048789Sahrens 		arc_buf_t **bufp;
2049789Sahrens 		uint64_t blksz = hdr->b_size;
2050789Sahrens 		spa_t *spa = hdr->b_spa;
2051789Sahrens 
20521544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2053789Sahrens 		/*
2054789Sahrens 		 * Pull the data off of this buf and attach it to
2055789Sahrens 		 * a new anonymous buf.
2056789Sahrens 		 */
20571544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2058789Sahrens 		bufp = &hdr->b_buf;
20591544Seschrock 		while (*bufp != buf)
2060789Sahrens 			bufp = &(*bufp)->b_next;
2061789Sahrens 		*bufp = (*bufp)->b_next;
20621544Seschrock 
2063789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2064789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
20651544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
20661544Seschrock 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
20671544Seschrock 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
20681544Seschrock 		}
20691544Seschrock 		hdr->b_datacnt -= 1;
20701544Seschrock 
2071789Sahrens 		mutex_exit(hash_lock);
2072789Sahrens 
2073789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2074789Sahrens 		nhdr->b_size = blksz;
2075789Sahrens 		nhdr->b_spa = spa;
2076789Sahrens 		nhdr->b_buf = buf;
2077789Sahrens 		nhdr->b_state = arc.anon;
2078789Sahrens 		nhdr->b_arc_access = 0;
2079789Sahrens 		nhdr->b_flags = 0;
20801544Seschrock 		nhdr->b_datacnt = 1;
2081789Sahrens 		buf->b_hdr = nhdr;
2082789Sahrens 		buf->b_next = NULL;
2083789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2084789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
2085789Sahrens 
2086789Sahrens 		hdr = nhdr;
2087789Sahrens 	} else {
20881544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2089789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2090789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2091789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
2092789Sahrens 		hdr->b_arc_access = 0;
2093789Sahrens 		mutex_exit(hash_lock);
2094789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2095789Sahrens 		hdr->b_birth = 0;
2096789Sahrens 		hdr->b_cksum0 = 0;
2097789Sahrens 	}
20981544Seschrock 	buf->b_efunc = NULL;
20991544Seschrock 	buf->b_private = NULL;
2100789Sahrens }
2101789Sahrens 
2102789Sahrens int
2103789Sahrens arc_released(arc_buf_t *buf)
2104789Sahrens {
21051544Seschrock 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
21061544Seschrock }
21071544Seschrock 
21081544Seschrock int
21091544Seschrock arc_has_callback(arc_buf_t *buf)
21101544Seschrock {
21111544Seschrock 	return (buf->b_efunc != NULL);
2112789Sahrens }
2113789Sahrens 
21141544Seschrock #ifdef ZFS_DEBUG
21151544Seschrock int
21161544Seschrock arc_referenced(arc_buf_t *buf)
21171544Seschrock {
21181544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
21191544Seschrock }
21201544Seschrock #endif
21211544Seschrock 
2122789Sahrens static void
2123789Sahrens arc_write_done(zio_t *zio)
2124789Sahrens {
2125789Sahrens 	arc_buf_t *buf;
2126789Sahrens 	arc_buf_hdr_t *hdr;
2127789Sahrens 	arc_callback_t *acb;
2128789Sahrens 
2129789Sahrens 	buf = zio->io_private;
2130789Sahrens 	hdr = buf->b_hdr;
2131789Sahrens 	acb = hdr->b_acb;
2132789Sahrens 	hdr->b_acb = NULL;
21331544Seschrock 	ASSERT(acb != NULL);
2134789Sahrens 
2135789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2136789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2137789Sahrens 
2138789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2139789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2140789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
21411544Seschrock 	/*
21421544Seschrock 	 * If the block to be written was all-zero, we may have
21431544Seschrock 	 * compressed it away.  In this case no write was performed
21441544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
21451544Seschrock 	 * must therefor remain anonymous (and uncached).
21461544Seschrock 	 */
2147789Sahrens 	if (!BUF_EMPTY(hdr)) {
2148789Sahrens 		arc_buf_hdr_t *exists;
2149789Sahrens 		kmutex_t *hash_lock;
2150789Sahrens 
2151789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2152789Sahrens 		if (exists) {
2153789Sahrens 			/*
2154789Sahrens 			 * This can only happen if we overwrite for
2155789Sahrens 			 * sync-to-convergence, because we remove
2156789Sahrens 			 * buffers from the hash table when we arc_free().
2157789Sahrens 			 */
2158789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2159789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2160789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2161789Sahrens 			    zio->io_bp->blk_birth);
2162789Sahrens 
2163789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2164789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
2165789Sahrens 			mutex_exit(hash_lock);
21661544Seschrock 			arc_hdr_destroy(exists);
2167789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2168789Sahrens 			ASSERT3P(exists, ==, NULL);
2169789Sahrens 		}
21701544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
21711544Seschrock 		arc_access_and_exit(hdr, hash_lock);
21721544Seschrock 	} else if (acb->acb_done == NULL) {
21731544Seschrock 		int destroy_hdr;
21741544Seschrock 		/*
21751544Seschrock 		 * This is an anonymous buffer with no user callback,
21761544Seschrock 		 * destroy it if there are no active references.
21771544Seschrock 		 */
21781544Seschrock 		mutex_enter(&arc_eviction_mtx);
21791544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
21801544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
21811544Seschrock 		mutex_exit(&arc_eviction_mtx);
21821544Seschrock 		if (destroy_hdr)
21831544Seschrock 			arc_hdr_destroy(hdr);
21841544Seschrock 	} else {
21851544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2186789Sahrens 	}
21871544Seschrock 
21881544Seschrock 	if (acb->acb_done) {
2189789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2190789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2191789Sahrens 	}
2192789Sahrens 
21931544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2194789Sahrens }
2195789Sahrens 
2196789Sahrens int
21971775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2198789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2199789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
22001544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2201789Sahrens {
2202789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2203789Sahrens 	arc_callback_t	*acb;
2204789Sahrens 	zio_t	*rzio;
2205789Sahrens 
2206789Sahrens 	/* this is a private buffer - no locking required */
2207789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2208789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2209789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
2210789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2211789Sahrens 	acb->acb_done = done;
2212789Sahrens 	acb->acb_private = private;
2213789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2214789Sahrens 	hdr->b_acb = acb;
22151544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
22161775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
22171544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2218789Sahrens 
2219789Sahrens 	if (arc_flags & ARC_WAIT)
2220789Sahrens 		return (zio_wait(rzio));
2221789Sahrens 
2222789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2223789Sahrens 	zio_nowait(rzio);
2224789Sahrens 
2225789Sahrens 	return (0);
2226789Sahrens }
2227789Sahrens 
2228789Sahrens int
2229789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2230789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2231789Sahrens {
2232789Sahrens 	arc_buf_hdr_t *ab;
2233789Sahrens 	kmutex_t *hash_lock;
2234789Sahrens 	zio_t	*zio;
2235789Sahrens 
2236789Sahrens 	/*
2237789Sahrens 	 * If this buffer is in the cache, release it, so it
2238789Sahrens 	 * can be re-used.
2239789Sahrens 	 */
2240789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2241789Sahrens 	if (ab != NULL) {
2242789Sahrens 		/*
2243789Sahrens 		 * The checksum of blocks to free is not always
2244789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2245789Sahrens 		 * nonzero, it should match what we have in the cache.
2246789Sahrens 		 */
2247789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2248789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2249789Sahrens 		arc_change_state(arc.anon, ab, hash_lock);
2250789Sahrens 		if (refcount_is_zero(&ab->b_refcnt)) {
2251789Sahrens 			mutex_exit(hash_lock);
22521544Seschrock 			arc_hdr_destroy(ab);
2253789Sahrens 			atomic_add_64(&arc.deleted, 1);
2254789Sahrens 		} else {
22551589Smaybee 			/*
22561589Smaybee 			 * We could have an outstanding read on this
22571589Smaybee 			 * block, so multiple active references are
22581589Smaybee 			 * possible.  But we should only have a single
22591589Smaybee 			 * data buffer associated at this point.
22601589Smaybee 			 */
22611544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 1);
2262789Sahrens 			if (HDR_IO_IN_PROGRESS(ab))
2263789Sahrens 				ab->b_flags |= ARC_FREED_IN_READ;
22641544Seschrock 			if (HDR_IN_HASH_TABLE(ab))
22651544Seschrock 				buf_hash_remove(ab);
2266789Sahrens 			ab->b_arc_access = 0;
2267789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2268789Sahrens 			ab->b_birth = 0;
2269789Sahrens 			ab->b_cksum0 = 0;
22701544Seschrock 			ab->b_buf->b_efunc = NULL;
22711544Seschrock 			ab->b_buf->b_private = NULL;
2272789Sahrens 			mutex_exit(hash_lock);
2273789Sahrens 		}
2274789Sahrens 	}
2275789Sahrens 
2276789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2277789Sahrens 
2278789Sahrens 	if (arc_flags & ARC_WAIT)
2279789Sahrens 		return (zio_wait(zio));
2280789Sahrens 
2281789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2282789Sahrens 	zio_nowait(zio);
2283789Sahrens 
2284789Sahrens 	return (0);
2285789Sahrens }
2286789Sahrens 
2287789Sahrens void
2288789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2289789Sahrens {
2290789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2291789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2292789Sahrens }
2293789Sahrens 
2294789Sahrens int
2295789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2296789Sahrens {
2297789Sahrens #ifdef ZFS_DEBUG
2298789Sahrens 	/*
2299789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2300789Sahrens 	 */
2301789Sahrens 	if (spa_get_random(10000) == 0) {
2302789Sahrens 		dprintf("forcing random failure\n");
2303789Sahrens 		return (ERESTART);
2304789Sahrens 	}
2305789Sahrens #endif
2306982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
2307982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
2308982Smaybee 	if (tempreserve > arc.c)
2309982Smaybee 		return (ENOMEM);
2310982Smaybee 
2311789Sahrens 	/*
2312982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2313982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2314982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2315982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2316982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2317982Smaybee 	 *
2318982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2319982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2320789Sahrens 	 */
2321789Sahrens 
2322982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2323982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2324789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2325789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
2326789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2327789Sahrens 		    tempreserve>>10, arc.c>>10);
2328789Sahrens 		return (ERESTART);
2329789Sahrens 	}
2330789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2331789Sahrens 	return (0);
2332789Sahrens }
2333789Sahrens 
2334789Sahrens void
2335789Sahrens arc_init(void)
2336789Sahrens {
2337789Sahrens 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
2338789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2339789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2340789Sahrens 
2341789Sahrens 	/* Start out with 1/8 of all memory */
2342789Sahrens 	arc.c = physmem * PAGESIZE / 8;
2343789Sahrens 
2344789Sahrens #ifdef _KERNEL
2345789Sahrens 	/*
2346789Sahrens 	 * On architectures where the physical memory can be larger
2347789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2348789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2349789Sahrens 	 */
2350789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2351789Sahrens #endif
2352789Sahrens 
2353982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2354789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
2355982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2356789Sahrens 	if (arc.c * 8 >= 1<<30)
2357789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
2358789Sahrens 	else
2359789Sahrens 		arc.c_max = arc.c_min;
2360789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
2361789Sahrens 	arc.c = arc.c_max;
2362789Sahrens 	arc.p = (arc.c >> 1);
2363789Sahrens 
2364789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2365789Sahrens 	if (kmem_debugging())
2366789Sahrens 		arc.c = arc.c / 2;
2367789Sahrens 	if (arc.c < arc.c_min)
2368789Sahrens 		arc.c = arc.c_min;
2369789Sahrens 
2370789Sahrens 	arc.anon = &ARC_anon;
23711544Seschrock 	arc.mru = &ARC_mru;
23721544Seschrock 	arc.mru_ghost = &ARC_mru_ghost;
23731544Seschrock 	arc.mfu = &ARC_mfu;
23741544Seschrock 	arc.mfu_ghost = &ARC_mfu_ghost;
23751544Seschrock 	arc.size = 0;
2376789Sahrens 
23771544Seschrock 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2378789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
23791544Seschrock 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2380789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
23811544Seschrock 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2382789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
23831544Seschrock 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2384789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2385789Sahrens 
2386789Sahrens 	buf_init();
2387789Sahrens 
2388789Sahrens 	arc_thread_exit = 0;
23891544Seschrock 	arc_eviction_list = NULL;
23901544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
2391789Sahrens 
2392789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2393789Sahrens 	    TS_RUN, minclsyspri);
2394789Sahrens }
2395789Sahrens 
2396789Sahrens void
2397789Sahrens arc_fini(void)
2398789Sahrens {
2399789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2400789Sahrens 	arc_thread_exit = 1;
2401789Sahrens 	while (arc_thread_exit != 0)
2402789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2403789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2404789Sahrens 
2405789Sahrens 	arc_flush();
2406789Sahrens 
2407789Sahrens 	arc_dead = TRUE;
2408789Sahrens 
24091544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2410789Sahrens 	mutex_destroy(&arc_reclaim_lock);
2411789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2412789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2413789Sahrens 
24141544Seschrock 	list_destroy(&arc.mru->list);
24151544Seschrock 	list_destroy(&arc.mru_ghost->list);
24161544Seschrock 	list_destroy(&arc.mfu->list);
24171544Seschrock 	list_destroy(&arc.mfu_ghost->list);
2418789Sahrens 
2419789Sahrens 	buf_fini();
2420789Sahrens }
2421