xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 982)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
5789Sahrens  * Common Development and Distribution License, Version 1.0 only
6789Sahrens  * (the "License").  You may not use this file except in compliance
7789Sahrens  * with the License.
8789Sahrens  *
9789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10789Sahrens  * or http://www.opensolaris.org/os/licensing.
11789Sahrens  * See the License for the specific language governing permissions
12789Sahrens  * and limitations under the License.
13789Sahrens  *
14789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
15789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
17789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
18789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
19789Sahrens  *
20789Sahrens  * CDDL HEADER END
21789Sahrens  */
22789Sahrens /*
23789Sahrens  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24789Sahrens  * Use is subject to license terms.
25789Sahrens  */
26789Sahrens 
27789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
28789Sahrens 
29789Sahrens /*
30789Sahrens  * DVA-based Adjustable Relpacement Cache
31789Sahrens  *
32789Sahrens  * While much of the theory of operation and algorithms used here
33789Sahrens  * are based on the self-tuning, low overhead replacement cache
34789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
35789Sahrens  * significant differences:
36789Sahrens  *
37789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
38789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
39789Sahrens  * the eviction algorithm simple: evict the last page in the list.
40789Sahrens  * This also make the performance characteristics easy to reason
41789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
42789Sahrens  * subset of the blocks in the cache are un-evictable because we
43789Sahrens  * have handed out a reference to them.  Blocks are only evictable
44789Sahrens  * when there are no external references active.  This makes
45789Sahrens  * eviction far more problematic:  we choose to evict the evictable
46789Sahrens  * blocks that are the "lowest" in the list.
47789Sahrens  *
48789Sahrens  * There are times when it is not possible to evict the requested
49789Sahrens  * space.  In these circumstances we are unable to adjust the cache
50789Sahrens  * size.  To prevent the cache growing unbounded at these times we
51789Sahrens  * implement a "cache throttle" that slowes the flow of new data
52789Sahrens  * into the cache until we can make space avaiable.
53789Sahrens  *
54789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
55789Sahrens  * Pages are evicted when the cache is full and there is a cache
56789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
57789Sahrens  * high use, but also tries to react to memory preasure from the
58789Sahrens  * operating system: decreasing its size when system memory is
59789Sahrens  * tight.
60789Sahrens  *
61789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
62789Sahrens  * elements of the cache are therefor exactly the same size.  So
63789Sahrens  * when adjusting the cache size following a cache miss, its simply
64789Sahrens  * a matter of choosing a single page to evict.  In our model, we
65789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
66789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
67789Sahrens  * space for a cache miss that approximates as closely as possible
68789Sahrens  * the space used by the new block.
69789Sahrens  *
70789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
72789Sahrens  */
73789Sahrens 
74789Sahrens /*
75789Sahrens  * The locking model:
76789Sahrens  *
77789Sahrens  * A new reference to a cache buffer can be obtained in two
78789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
79789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
80789Sahrens  * uses method 1, while the internal arc algorithms for
81789Sahrens  * adjusting the cache use method 2.  We therefor provide two
82789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
83789Sahrens  * arc list locks.
84789Sahrens  *
85789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
86789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
87789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
88789Sahrens  *
89789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
90789Sahrens  * locates the requested buffer in the hash table.  It returns
91789Sahrens  * NULL for the mutex if the buffer was not in the table.
92789Sahrens  *
93789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
94789Sahrens  * already held before it is invoked.
95789Sahrens  *
96789Sahrens  * Each arc state also has a mutex which is used to protect the
97789Sahrens  * buffer list associated with the state.  When attempting to
98789Sahrens  * obtain a hash table lock while holding an arc list lock you
99789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
100789Sahrens  * the "top" state mutex must be held before the "bot" state mutex.
101789Sahrens  *
102789Sahrens  * Note that the majority of the performance stats are manipulated
103789Sahrens  * with atomic operations.
104789Sahrens  */
105789Sahrens 
106789Sahrens #include <sys/spa.h>
107789Sahrens #include <sys/zio.h>
108789Sahrens #include <sys/zfs_context.h>
109789Sahrens #include <sys/arc.h>
110789Sahrens #include <sys/refcount.h>
111789Sahrens #ifdef _KERNEL
112789Sahrens #include <sys/vmsystm.h>
113789Sahrens #include <vm/anon.h>
114789Sahrens #include <sys/fs/swapnode.h>
115789Sahrens #endif
116789Sahrens #include <sys/callb.h>
117789Sahrens 
118789Sahrens static kmutex_t		arc_reclaim_thr_lock;
119789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
120789Sahrens static uint8_t		arc_thread_exit;
121789Sahrens 
122789Sahrens typedef enum arc_reclaim_strategy {
123789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
124789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
125789Sahrens } arc_reclaim_strategy_t;
126789Sahrens 
127789Sahrens /* number of seconds before growing cache again */
128789Sahrens static int		arc_grow_retry = 60;
129789Sahrens 
130789Sahrens static kmutex_t arc_reclaim_lock;
131789Sahrens static int arc_dead;
132789Sahrens 
133789Sahrens /*
134789Sahrens  * Note that buffers can be on one of 5 states:
135789Sahrens  *	ARC_anon	- anonymous (discussed below)
136789Sahrens  *	ARC_mru_top	- recently used, currently cached
137789Sahrens  *	ARC_mru_bot	- recentely used, no longer in cache
138789Sahrens  *	ARC_mfu_top	- frequently used, currently cached
139789Sahrens  *	ARC_mfu_bot	- frequently used, no longer in cache
140789Sahrens  * When there are no active references to the buffer, they
141789Sahrens  * are linked onto one of the lists in arc.  These are the
142789Sahrens  * only buffers that can be evicted or deleted.
143789Sahrens  *
144789Sahrens  * Anonymous buffers are buffers that are not associated with
145789Sahrens  * a DVA.  These are buffers that hold dirty block copies
146789Sahrens  * before they are written to stable storage.  By definition,
147789Sahrens  * they are "ref'd" and are considered part of arc_mru_top
148789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
149789Sahrens  * as they are written and migrate onto the arc_mru_top list.
150789Sahrens  */
151789Sahrens 
152789Sahrens typedef struct arc_state {
153789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
154789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
155789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
156789Sahrens 	uint64_t hits;
157789Sahrens 	kmutex_t mtx;
158789Sahrens } arc_state_t;
159789Sahrens 
160789Sahrens /* The 5 states: */
161789Sahrens static arc_state_t ARC_anon;
162789Sahrens static arc_state_t ARC_mru_top;
163789Sahrens static arc_state_t ARC_mru_bot;
164789Sahrens static arc_state_t ARC_mfu_top;
165789Sahrens static arc_state_t ARC_mfu_bot;
166789Sahrens 
167789Sahrens static struct arc {
168789Sahrens 	arc_state_t 	*anon;
169789Sahrens 	arc_state_t	*mru_top;
170789Sahrens 	arc_state_t	*mru_bot;
171789Sahrens 	arc_state_t	*mfu_top;
172789Sahrens 	arc_state_t	*mfu_bot;
173789Sahrens 	uint64_t	size;		/* Actual total arc size */
174789Sahrens 	uint64_t	p;		/* Target size (in bytes) of mru_top */
175789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
176789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
177789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
178789Sahrens 	uint64_t	incr;		/* Size by which to increment arc.c */
179789Sahrens 	int64_t		size_check;
180789Sahrens 
181789Sahrens 	/* performance stats */
182789Sahrens 	uint64_t	hits;
183789Sahrens 	uint64_t	misses;
184789Sahrens 	uint64_t	deleted;
185789Sahrens 	uint64_t	skipped;
186789Sahrens 	uint64_t	hash_elements;
187789Sahrens 	uint64_t	hash_elements_max;
188789Sahrens 	uint64_t	hash_collisions;
189789Sahrens 	uint64_t	hash_chains;
190789Sahrens 	uint32_t	hash_chain_max;
191789Sahrens 
192789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
193789Sahrens } arc;
194789Sahrens 
195789Sahrens /* Default amount to grow arc.incr */
196789Sahrens static int64_t arc_incr_size = 1024;
197789Sahrens 
198789Sahrens /* > 0 ==> time to increment arc.c */
199789Sahrens static int64_t arc_size_check_default = -1000;
200789Sahrens 
201789Sahrens static uint64_t arc_tempreserve;
202789Sahrens 
203789Sahrens typedef struct arc_callback arc_callback_t;
204789Sahrens 
205789Sahrens struct arc_callback {
206789Sahrens 	arc_done_func_t		*acb_done;
207789Sahrens 	void			*acb_private;
208789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
209789Sahrens 	arc_buf_t		*acb_buf;
210789Sahrens 	zio_t			*acb_zio_dummy;
211789Sahrens 	arc_callback_t		*acb_next;
212789Sahrens };
213789Sahrens 
214789Sahrens struct arc_buf_hdr {
215789Sahrens 	/* immutable */
216789Sahrens 	uint64_t		b_size;
217789Sahrens 	spa_t			*b_spa;
218789Sahrens 
219789Sahrens 	/* protected by hash lock */
220789Sahrens 	dva_t			b_dva;
221789Sahrens 	uint64_t		b_birth;
222789Sahrens 	uint64_t		b_cksum0;
223789Sahrens 
224789Sahrens 	arc_buf_hdr_t		*b_hash_next;
225789Sahrens 	arc_buf_t		*b_buf;
226789Sahrens 	uint32_t		b_flags;
227789Sahrens 
228789Sahrens 	kcondvar_t		b_cv;
229789Sahrens 	arc_callback_t		*b_acb;
230789Sahrens 
231789Sahrens 	/* protected by arc state mutex */
232789Sahrens 	arc_state_t		*b_state;
233789Sahrens 	list_node_t		b_arc_node;
234789Sahrens 
235789Sahrens 	/* updated atomically */
236789Sahrens 	clock_t			b_arc_access;
237789Sahrens 
238789Sahrens 	/* self protecting */
239789Sahrens 	refcount_t		b_refcnt;
240789Sahrens };
241789Sahrens 
242789Sahrens /*
243789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
244789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
245789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
246789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
247789Sahrens  * public flags, make sure not to smash the private ones.
248789Sahrens  */
249789Sahrens 
250789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
251789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
252789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
253789Sahrens 
254789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
255789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
256789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
257789Sahrens 
258789Sahrens /*
259789Sahrens  * Hash table routines
260789Sahrens  */
261789Sahrens 
262789Sahrens #define	HT_LOCK_PAD	64
263789Sahrens 
264789Sahrens struct ht_lock {
265789Sahrens 	kmutex_t	ht_lock;
266789Sahrens #ifdef _KERNEL
267789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
268789Sahrens #endif
269789Sahrens };
270789Sahrens 
271789Sahrens #define	BUF_LOCKS 256
272789Sahrens typedef struct buf_hash_table {
273789Sahrens 	uint64_t ht_mask;
274789Sahrens 	arc_buf_hdr_t **ht_table;
275789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
276789Sahrens } buf_hash_table_t;
277789Sahrens 
278789Sahrens static buf_hash_table_t buf_hash_table;
279789Sahrens 
280789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
281789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
282789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
283789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
284789Sahrens #define	HDR_LOCK(buf) \
285789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
286789Sahrens 
287789Sahrens uint64_t zfs_crc64_table[256];
288789Sahrens 
289789Sahrens static uint64_t
290789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
291789Sahrens {
292789Sahrens 	uintptr_t spav = (uintptr_t)spa;
293789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
294789Sahrens 	uint64_t crc = -1ULL;
295789Sahrens 	int i;
296789Sahrens 
297789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
298789Sahrens 
299789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
300789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
301789Sahrens 
302789Sahrens 	crc ^= (spav>>8) ^ birth;
303789Sahrens 
304789Sahrens 	return (crc);
305789Sahrens }
306789Sahrens 
307789Sahrens #define	BUF_EMPTY(buf)						\
308789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
309789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
310789Sahrens 	(buf)->b_birth == 0)
311789Sahrens 
312789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
313789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
314789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
315789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
316789Sahrens 
317789Sahrens static arc_buf_hdr_t *
318789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
319789Sahrens {
320789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
321789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
322789Sahrens 	arc_buf_hdr_t *buf;
323789Sahrens 
324789Sahrens 	mutex_enter(hash_lock);
325789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
326789Sahrens 	    buf = buf->b_hash_next) {
327789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
328789Sahrens 			*lockp = hash_lock;
329789Sahrens 			return (buf);
330789Sahrens 		}
331789Sahrens 	}
332789Sahrens 	mutex_exit(hash_lock);
333789Sahrens 	*lockp = NULL;
334789Sahrens 	return (NULL);
335789Sahrens }
336789Sahrens 
337789Sahrens /*
338789Sahrens  * Insert an entry into the hash table.  If there is already an element
339789Sahrens  * equal to elem in the hash table, then the already existing element
340789Sahrens  * will be returned and the new element will not be inserted.
341789Sahrens  * Otherwise returns NULL.
342789Sahrens  */
343789Sahrens static arc_buf_hdr_t *fbufs[4]; /* XXX to find 6341326 */
344789Sahrens static kthread_t *fbufs_lastthread;
345789Sahrens static arc_buf_hdr_t *
346789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
347789Sahrens {
348789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
349789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
350789Sahrens 	arc_buf_hdr_t *fbuf;
351789Sahrens 	uint32_t max, i;
352789Sahrens 
353789Sahrens 	fbufs_lastthread = curthread;
354789Sahrens 	*lockp = hash_lock;
355789Sahrens 	mutex_enter(hash_lock);
356789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
357789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
358789Sahrens 		if (i < sizeof (fbufs) / sizeof (fbufs[0]))
359789Sahrens 			fbufs[i] = fbuf;
360789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
361789Sahrens 			return (fbuf);
362789Sahrens 	}
363789Sahrens 
364789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
365789Sahrens 	buf_hash_table.ht_table[idx] = buf;
366789Sahrens 
367789Sahrens 	/* collect some hash table performance data */
368789Sahrens 	if (i > 0) {
369789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
370789Sahrens 		if (i == 1)
371789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
372789Sahrens 	}
373789Sahrens 	while (i > (max = arc.hash_chain_max) &&
374789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
375789Sahrens 		continue;
376789Sahrens 	}
377789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
378789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
379789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
380789Sahrens 
381789Sahrens 	return (NULL);
382789Sahrens }
383789Sahrens 
384789Sahrens static void
385789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
386789Sahrens {
387789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
388789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
389789Sahrens 
390789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
391789Sahrens 
392789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
393789Sahrens 	while ((fbuf = *bufp) != buf) {
394789Sahrens 		ASSERT(fbuf != NULL);
395789Sahrens 		bufp = &fbuf->b_hash_next;
396789Sahrens 	}
397789Sahrens 	*bufp = buf->b_hash_next;
398789Sahrens 	buf->b_hash_next = NULL;
399789Sahrens 
400789Sahrens 	/* collect some hash table performance data */
401789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
402789Sahrens 	if (buf_hash_table.ht_table[idx] &&
403789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
404789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
405789Sahrens }
406789Sahrens 
407789Sahrens /*
408789Sahrens  * Global data structures and functions for the buf kmem cache.
409789Sahrens  */
410789Sahrens static kmem_cache_t *hdr_cache;
411789Sahrens static kmem_cache_t *buf_cache;
412789Sahrens 
413789Sahrens static void
414789Sahrens buf_fini(void)
415789Sahrens {
416789Sahrens 	int i;
417789Sahrens 
418789Sahrens 	kmem_free(buf_hash_table.ht_table,
419789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
420789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
421789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
422789Sahrens 	kmem_cache_destroy(hdr_cache);
423789Sahrens 	kmem_cache_destroy(buf_cache);
424789Sahrens }
425789Sahrens 
426789Sahrens /*
427789Sahrens  * Constructor callback - called when the cache is empty
428789Sahrens  * and a new buf is requested.
429789Sahrens  */
430789Sahrens /* ARGSUSED */
431789Sahrens static int
432789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
433789Sahrens {
434789Sahrens 	arc_buf_hdr_t *buf = vbuf;
435789Sahrens 
436789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
437789Sahrens 	refcount_create(&buf->b_refcnt);
438789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
439789Sahrens 	return (0);
440789Sahrens }
441789Sahrens 
442789Sahrens /*
443789Sahrens  * Destructor callback - called when a cached buf is
444789Sahrens  * no longer required.
445789Sahrens  */
446789Sahrens /* ARGSUSED */
447789Sahrens static void
448789Sahrens hdr_dest(void *vbuf, void *unused)
449789Sahrens {
450789Sahrens 	arc_buf_hdr_t *buf = vbuf;
451789Sahrens 
452789Sahrens 	refcount_destroy(&buf->b_refcnt);
453789Sahrens 	cv_destroy(&buf->b_cv);
454789Sahrens }
455789Sahrens 
456789Sahrens void arc_kmem_reclaim(void);
457789Sahrens 
458789Sahrens /*
459789Sahrens  * Reclaim callback -- invoked when memory is low.
460789Sahrens  */
461789Sahrens /* ARGSUSED */
462789Sahrens static void
463789Sahrens hdr_recl(void *unused)
464789Sahrens {
465789Sahrens 	dprintf("hdr_recl called\n");
466789Sahrens 	arc_kmem_reclaim();
467789Sahrens }
468789Sahrens 
469789Sahrens static void
470789Sahrens buf_init(void)
471789Sahrens {
472789Sahrens 	uint64_t *ct;
473789Sahrens 	uint64_t hsize = 1ULL << 10;
474789Sahrens 	int i, j;
475789Sahrens 
476789Sahrens 	/*
477789Sahrens 	 * The hash table is big enough to fill all of physical memory
478789Sahrens 	 * with an average 4k block size.  The table will take up
479789Sahrens 	 * totalmem*sizeof(void*)/4k bytes (eg. 2MB/GB with 8-byte
480789Sahrens 	 * pointers).
481789Sahrens 	 */
482789Sahrens 	while (hsize * 4096 < physmem * PAGESIZE)
483789Sahrens 		hsize <<= 1;
484789Sahrens 
485789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
486789Sahrens 	buf_hash_table.ht_table = kmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
487789Sahrens 
488789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
489789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
490789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
491789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
492789Sahrens 
493789Sahrens 	for (i = 0; i < 256; i++)
494789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
495789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
496789Sahrens 
497789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
498789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
499789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
500789Sahrens 	}
501789Sahrens }
502789Sahrens 
503789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
504789Sahrens 
505789Sahrens #define	ARC_TAG		(void *)0x05201962
506789Sahrens 
507789Sahrens static void
508789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
509789Sahrens {
510789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
511789Sahrens 
512789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
513789Sahrens 	    (ab->b_state != arc.anon)) {
514789Sahrens 
515789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
516789Sahrens 		mutex_enter(&ab->b_state->mtx);
517789Sahrens 		ASSERT(!refcount_is_zero(&ab->b_refcnt));
518789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
519789Sahrens 		list_remove(&ab->b_state->list, ab);
520789Sahrens 		ASSERT3U(ab->b_state->lsize, >=, ab->b_size);
521789Sahrens 		ab->b_state->lsize -= ab->b_size;
522789Sahrens 		mutex_exit(&ab->b_state->mtx);
523789Sahrens 	}
524789Sahrens }
525789Sahrens 
526789Sahrens static int
527789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
528789Sahrens {
529789Sahrens 	int cnt;
530789Sahrens 
531789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
532789Sahrens 
533789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
534789Sahrens 	    (ab->b_state != arc.anon)) {
535789Sahrens 
536789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
537789Sahrens 		mutex_enter(&ab->b_state->mtx);
538789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
539789Sahrens 		list_insert_head(&ab->b_state->list, ab);
540789Sahrens 		ASSERT(ab->b_buf != NULL);
541789Sahrens 		ab->b_state->lsize += ab->b_size;
542789Sahrens 		mutex_exit(&ab->b_state->mtx);
543789Sahrens 	}
544789Sahrens 	return (cnt);
545789Sahrens }
546789Sahrens 
547789Sahrens /*
548789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
549789Sahrens  * for the buffer must be held by the caller.
550789Sahrens  */
551789Sahrens static void
552789Sahrens arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab,
553789Sahrens     kmutex_t *hash_lock)
554789Sahrens {
555789Sahrens 	arc_buf_t *buf;
556789Sahrens 
557789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
558789Sahrens 
559789Sahrens 	/*
560789Sahrens 	 * If this buffer is evictable, transfer it from the
561789Sahrens 	 * old state list to the new state list.
562789Sahrens 	 */
563789Sahrens 	if (refcount_is_zero(&ab->b_refcnt)) {
564789Sahrens 		if (ab->b_state != arc.anon) {
565789Sahrens 			int drop_mutex = FALSE;
566789Sahrens 
567789Sahrens 			if (!MUTEX_HELD(&ab->b_state->mtx)) {
568789Sahrens 				mutex_enter(&ab->b_state->mtx);
569789Sahrens 				drop_mutex = TRUE;
570789Sahrens 			}
571789Sahrens 			ASSERT(list_link_active(&ab->b_arc_node));
572789Sahrens 			list_remove(&ab->b_state->list, ab);
573789Sahrens 			ASSERT3U(ab->b_state->lsize, >=, ab->b_size);
574789Sahrens 			ab->b_state->lsize -= ab->b_size;
575789Sahrens 			if (drop_mutex)
576789Sahrens 				mutex_exit(&ab->b_state->mtx);
577789Sahrens 		}
578789Sahrens 		if (new_state != arc.anon) {
579789Sahrens 			int drop_mutex = FALSE;
580789Sahrens 
581789Sahrens 			if (!MUTEX_HELD(&new_state->mtx)) {
582789Sahrens 				mutex_enter(&new_state->mtx);
583789Sahrens 				drop_mutex = TRUE;
584789Sahrens 			}
585789Sahrens 			list_insert_head(&new_state->list, ab);
586789Sahrens 			ASSERT(ab->b_buf != NULL);
587789Sahrens 			new_state->lsize += ab->b_size;
588789Sahrens 			if (drop_mutex)
589789Sahrens 				mutex_exit(&new_state->mtx);
590789Sahrens 		}
591789Sahrens 	}
592789Sahrens 
593789Sahrens 	ASSERT(!BUF_EMPTY(ab));
594789Sahrens 	if (new_state == arc.anon && ab->b_state != arc.anon) {
595789Sahrens 		buf_hash_remove(ab);
596789Sahrens 	}
597789Sahrens 
598789Sahrens 	/*
599789Sahrens 	 * If this buffer isn't being transferred to the MRU-top
600789Sahrens 	 * state, it's safe to clear its prefetch flag
601789Sahrens 	 */
602789Sahrens 	if ((new_state != arc.mru_top) && (new_state != arc.mru_bot)) {
603789Sahrens 		ab->b_flags &= ~ARC_PREFETCH;
604789Sahrens 	}
605789Sahrens 
606789Sahrens 	buf = ab->b_buf;
607789Sahrens 	if (buf == NULL) {
608789Sahrens 		ASSERT3U(ab->b_state->size, >=, ab->b_size);
609789Sahrens 		atomic_add_64(&ab->b_state->size, -ab->b_size);
610789Sahrens 		/* we should only be here if we are deleting state */
611789Sahrens 		ASSERT(new_state == arc.anon &&
612789Sahrens 		    (ab->b_state == arc.mru_bot || ab->b_state == arc.mfu_bot));
613789Sahrens 	} else while (buf) {
614789Sahrens 		ASSERT3U(ab->b_state->size, >=, ab->b_size);
615789Sahrens 		atomic_add_64(&ab->b_state->size, -ab->b_size);
616789Sahrens 		atomic_add_64(&new_state->size, ab->b_size);
617789Sahrens 		buf = buf->b_next;
618789Sahrens 	}
619789Sahrens 	ab->b_state = new_state;
620789Sahrens }
621789Sahrens 
622789Sahrens arc_buf_t *
623789Sahrens arc_buf_alloc(spa_t *spa, int size, void *tag)
624789Sahrens {
625789Sahrens 	arc_buf_hdr_t *hdr;
626789Sahrens 	arc_buf_t *buf;
627789Sahrens 
628789Sahrens 	ASSERT3U(size, >, 0);
629789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
630789Sahrens 	ASSERT(BUF_EMPTY(hdr));
631789Sahrens 	hdr->b_size = size;
632789Sahrens 	hdr->b_spa = spa;
633789Sahrens 	hdr->b_state = arc.anon;
634789Sahrens 	hdr->b_arc_access = 0;
635789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
636789Sahrens 	buf->b_hdr = hdr;
637789Sahrens 	buf->b_next = NULL;
638789Sahrens 	buf->b_data = zio_buf_alloc(size);
639789Sahrens 	hdr->b_buf = buf;
640789Sahrens 	hdr->b_flags = 0;
641789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
642789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
643789Sahrens 
644789Sahrens 	atomic_add_64(&arc.size, size);
645789Sahrens 	atomic_add_64(&arc.anon->size, size);
646789Sahrens 
647789Sahrens 	return (buf);
648789Sahrens }
649789Sahrens 
650789Sahrens static void
651789Sahrens arc_hdr_free(arc_buf_hdr_t *hdr)
652789Sahrens {
653789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
654789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
655789Sahrens 
656789Sahrens 	if (!BUF_EMPTY(hdr)) {
657789Sahrens 		/*
658789Sahrens 		 * We can be called with an arc state lock held,
659789Sahrens 		 * so we can't hold a hash lock here.
660789Sahrens 		 * ASSERT(not in hash table)
661789Sahrens 		 */
662789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
663789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
664789Sahrens 		hdr->b_birth = 0;
665789Sahrens 		hdr->b_cksum0 = 0;
666789Sahrens 	}
667789Sahrens 	if (hdr->b_buf) {
668789Sahrens 		arc_buf_t *buf = hdr->b_buf;
669789Sahrens 
670789Sahrens 		ASSERT3U(hdr->b_size, >, 0);
671789Sahrens 		zio_buf_free(buf->b_data, hdr->b_size);
672789Sahrens 		atomic_add_64(&arc.size, -hdr->b_size);
673789Sahrens 		ASSERT3U(arc.anon->size, >=, hdr->b_size);
674789Sahrens 		atomic_add_64(&arc.anon->size, -hdr->b_size);
675789Sahrens 		ASSERT3P(buf->b_next, ==, NULL);
676789Sahrens 		kmem_cache_free(buf_cache, buf);
677789Sahrens 		hdr->b_buf = NULL;
678789Sahrens 	}
679789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
680789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
681789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
682789Sahrens 	kmem_cache_free(hdr_cache, hdr);
683789Sahrens }
684789Sahrens 
685789Sahrens void
686789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
687789Sahrens {
688789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
689789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
690789Sahrens 	int freeable;
691789Sahrens 
692789Sahrens 	mutex_enter(hash_lock);
693789Sahrens 	if (remove_reference(hdr, hash_lock, tag) > 0) {
694789Sahrens 		arc_buf_t **bufp = &hdr->b_buf;
695789Sahrens 		arc_state_t *state = hdr->b_state;
696789Sahrens 		uint64_t size = hdr->b_size;
697789Sahrens 
698789Sahrens 		ASSERT(hdr->b_state != arc.anon || HDR_IO_ERROR(hdr));
699789Sahrens 		while (*bufp != buf) {
700789Sahrens 			ASSERT(*bufp);
701789Sahrens 			bufp = &(*bufp)->b_next;
702789Sahrens 		}
703789Sahrens 		*bufp = buf->b_next;
704789Sahrens 		mutex_exit(hash_lock);
705789Sahrens 		zio_buf_free(buf->b_data, size);
706789Sahrens 		atomic_add_64(&arc.size, -size);
707789Sahrens 		kmem_cache_free(buf_cache, buf);
708789Sahrens 		ASSERT3U(state->size, >=, size);
709789Sahrens 		atomic_add_64(&state->size, -size);
710789Sahrens 		return;
711789Sahrens 	}
712789Sahrens 
713789Sahrens 	/* don't free buffers that are in the middle of an async write */
714789Sahrens 	freeable = (hdr->b_state == arc.anon && hdr->b_acb == NULL);
715789Sahrens 	mutex_exit(hash_lock);
716789Sahrens 
717789Sahrens 	if (freeable)
718789Sahrens 		arc_hdr_free(hdr);
719789Sahrens }
720789Sahrens 
721789Sahrens int
722789Sahrens arc_buf_size(arc_buf_t *buf)
723789Sahrens {
724789Sahrens 	return (buf->b_hdr->b_size);
725789Sahrens }
726789Sahrens 
727789Sahrens /*
728789Sahrens  * Evict buffers from list until we've removed the specified number of
729789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
730789Sahrens  */
731789Sahrens static uint64_t
732789Sahrens arc_evict_state(arc_state_t *state, int64_t bytes)
733789Sahrens {
734789Sahrens 	arc_state_t *evicted_state;
735789Sahrens 	uint64_t bytes_evicted = 0;
736789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
737789Sahrens 	kmutex_t *hash_lock;
738789Sahrens 
739789Sahrens 	ASSERT(state == arc.mru_top || state == arc.mfu_top);
740789Sahrens 
741789Sahrens 	if (state == arc.mru_top)
742789Sahrens 		evicted_state = arc.mru_bot;
743789Sahrens 	else
744789Sahrens 		evicted_state = arc.mfu_bot;
745789Sahrens 
746789Sahrens 	mutex_enter(&state->mtx);
747789Sahrens 	mutex_enter(&evicted_state->mtx);
748789Sahrens 
749789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
750789Sahrens 		ab_prev = list_prev(&state->list, ab);
751789Sahrens 		hash_lock = HDR_LOCK(ab);
752789Sahrens 		if (mutex_tryenter(hash_lock)) {
753789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
754789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
755789Sahrens 			zio_buf_free(ab->b_buf->b_data, ab->b_size);
756789Sahrens 			atomic_add_64(&arc.size, -ab->b_size);
757789Sahrens 			ASSERT3P(ab->b_buf->b_next, ==, NULL);
758789Sahrens 			kmem_cache_free(buf_cache, ab->b_buf);
759789Sahrens 			ab->b_buf = NULL;
760789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
761789Sahrens 			bytes_evicted += ab->b_size;
762789Sahrens 			mutex_exit(hash_lock);
763789Sahrens 			if (bytes_evicted >= bytes)
764789Sahrens 				break;
765789Sahrens 		} else {
766789Sahrens 			atomic_add_64(&arc.skipped, 1);
767789Sahrens 		}
768789Sahrens 	}
769789Sahrens 	mutex_exit(&evicted_state->mtx);
770789Sahrens 	mutex_exit(&state->mtx);
771789Sahrens 
772789Sahrens 	if (bytes_evicted < bytes)
773789Sahrens 		dprintf("only evicted %lld bytes from %x",
774789Sahrens 		    (longlong_t)bytes_evicted, state);
775789Sahrens 
776789Sahrens 	return (bytes_evicted);
777789Sahrens }
778789Sahrens 
779789Sahrens /*
780789Sahrens  * Remove buffers from list until we've removed the specified number of
781789Sahrens  * bytes.  Destroy the buffers that are removed.
782789Sahrens  */
783789Sahrens static void
784789Sahrens arc_delete_state(arc_state_t *state, int64_t bytes)
785789Sahrens {
786789Sahrens 	uint_t bufs_skipped = 0;
787789Sahrens 	uint64_t bytes_deleted = 0;
788789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
789789Sahrens 	kmutex_t *hash_lock;
790789Sahrens 
791789Sahrens top:
792789Sahrens 	mutex_enter(&state->mtx);
793789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
794789Sahrens 		ab_prev = list_prev(&state->list, ab);
795789Sahrens 		hash_lock = HDR_LOCK(ab);
796789Sahrens 		if (mutex_tryenter(hash_lock)) {
797789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
798789Sahrens 			mutex_exit(hash_lock);
799789Sahrens 			atomic_add_64(&arc.deleted, 1);
800789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
801789Sahrens 			bytes_deleted += ab->b_size;
802789Sahrens 			arc_hdr_free(ab);
803789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
804789Sahrens 				break;
805789Sahrens 		} else {
806789Sahrens 			if (bytes < 0) {
807789Sahrens 				mutex_exit(&state->mtx);
808789Sahrens 				mutex_enter(hash_lock);
809789Sahrens 				mutex_exit(hash_lock);
810789Sahrens 				goto top;
811789Sahrens 			}
812789Sahrens 			bufs_skipped += 1;
813789Sahrens 		}
814789Sahrens 	}
815789Sahrens 	mutex_exit(&state->mtx);
816789Sahrens 
817789Sahrens 	if (bufs_skipped) {
818789Sahrens 		atomic_add_64(&arc.skipped, bufs_skipped);
819789Sahrens 		ASSERT(bytes >= 0);
820789Sahrens 	}
821789Sahrens 
822789Sahrens 	if (bytes_deleted < bytes)
823789Sahrens 		dprintf("only deleted %lld bytes from %p",
824789Sahrens 		    (longlong_t)bytes_deleted, state);
825789Sahrens }
826789Sahrens 
827789Sahrens static void
828789Sahrens arc_adjust(void)
829789Sahrens {
830789Sahrens 	int64_t top_sz, mru_over, arc_over;
831789Sahrens 
832789Sahrens 	top_sz = arc.anon->size + arc.mru_top->size;
833789Sahrens 
834789Sahrens 	if (top_sz > arc.p && arc.mru_top->lsize > 0) {
835789Sahrens 		int64_t toevict = MIN(arc.mru_top->lsize, top_sz-arc.p);
836789Sahrens 		(void) arc_evict_state(arc.mru_top, toevict);
837789Sahrens 		top_sz = arc.anon->size + arc.mru_top->size;
838789Sahrens 	}
839789Sahrens 
840789Sahrens 	mru_over = top_sz + arc.mru_bot->size - arc.c;
841789Sahrens 
842789Sahrens 	if (mru_over > 0) {
843789Sahrens 		if (arc.mru_bot->lsize > 0) {
844789Sahrens 			int64_t todelete = MIN(arc.mru_bot->lsize, mru_over);
845789Sahrens 			arc_delete_state(arc.mru_bot, todelete);
846789Sahrens 		}
847789Sahrens 	}
848789Sahrens 
849789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
850789Sahrens 		int64_t table_over;
851789Sahrens 
852789Sahrens 		if (arc.mfu_top->lsize > 0) {
853789Sahrens 			int64_t toevict = MIN(arc.mfu_top->lsize, arc_over);
854789Sahrens 			(void) arc_evict_state(arc.mfu_top, toevict);
855789Sahrens 		}
856789Sahrens 
857789Sahrens 		table_over = arc.size + arc.mru_bot->lsize + arc.mfu_bot->lsize
858789Sahrens 		    - arc.c*2;
859789Sahrens 
860789Sahrens 		if (table_over > 0 && arc.mfu_bot->lsize > 0) {
861789Sahrens 			int64_t todelete = MIN(arc.mfu_bot->lsize, table_over);
862789Sahrens 			arc_delete_state(arc.mfu_bot, todelete);
863789Sahrens 		}
864789Sahrens 	}
865789Sahrens }
866789Sahrens 
867789Sahrens /*
868789Sahrens  * Flush all *evictable* data from the cache.
869789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
870789Sahrens  */
871789Sahrens void
872789Sahrens arc_flush(void)
873789Sahrens {
874789Sahrens 	arc_delete_state(arc.mru_top, -1);
875789Sahrens 	arc_delete_state(arc.mfu_top, -1);
876789Sahrens 
877789Sahrens 	arc_delete_state(arc.mru_bot, -1);
878789Sahrens 	arc_delete_state(arc.mfu_bot, -1);
879789Sahrens }
880789Sahrens 
881789Sahrens void
882789Sahrens arc_kmem_reclaim(void)
883789Sahrens {
884789Sahrens 	/* Remove 6.25% */
885789Sahrens 	/*
886789Sahrens 	 * We need arc_reclaim_lock because we don't want multiple
887789Sahrens 	 * threads trying to reclaim concurrently.
888789Sahrens 	 */
889789Sahrens 
890789Sahrens 	/*
891789Sahrens 	 * umem calls the reclaim func when we destroy the buf cache,
892789Sahrens 	 * which is after we do arc_fini().  So we set a flag to prevent
893789Sahrens 	 * accessing the destroyed mutexes and lists.
894789Sahrens 	 */
895789Sahrens 	if (arc_dead)
896789Sahrens 		return;
897789Sahrens 
898789Sahrens 	mutex_enter(&arc_reclaim_lock);
899789Sahrens 
900789Sahrens 	atomic_add_64(&arc.c, -(arc.c >> 4));
901789Sahrens 	if (arc.c < arc.c_min)
902789Sahrens 		arc.c = arc.c_min;
903789Sahrens 	atomic_add_64(&arc.p, -(arc.p >> 4));
904789Sahrens 
905789Sahrens 	arc_adjust();
906789Sahrens 
907789Sahrens 	/* Cool it for a while */
908789Sahrens 	arc.incr = 0;
909789Sahrens 	arc.size_check = arc_size_check_default << 3;
910789Sahrens 
911789Sahrens 	mutex_exit(&arc_reclaim_lock);
912789Sahrens }
913789Sahrens 
914789Sahrens static int
915789Sahrens arc_reclaim_needed(void)
916789Sahrens {
917789Sahrens 	uint64_t extra;
918789Sahrens 
919789Sahrens #ifdef _KERNEL
920789Sahrens 	/*
921789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
922789Sahrens 	 */
923789Sahrens 	extra = desfree;
924789Sahrens 
925789Sahrens 	/*
926789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
927789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
928789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
929789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
930789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
931789Sahrens 	 */
932789Sahrens 	if (freemem < lotsfree + needfree + extra)
933789Sahrens 		return (1);
934789Sahrens 
935789Sahrens 	/*
936789Sahrens 	 * check to make sure that swapfs has enough space so that anon
937789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
938789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
939789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
940789Sahrens 	 * circumstances from getting really dire.
941789Sahrens 	 */
942789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
943789Sahrens 		return (1);
944789Sahrens 
945789Sahrens 	/*
946789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
947789Sahrens 	 * kernel heap space before we ever run out of available physical
948789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
949789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
950789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
951789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
952789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
953789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
954789Sahrens 	 * free)
955789Sahrens 	 */
956789Sahrens #if defined(__i386)
957789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
958789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
959789Sahrens 		return (1);
960789Sahrens #endif
961789Sahrens 
962789Sahrens #else
963789Sahrens 	if (spa_get_random(100) == 0)
964789Sahrens 		return (1);
965789Sahrens #endif
966789Sahrens 	return (0);
967789Sahrens }
968789Sahrens 
969789Sahrens static void
970789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
971789Sahrens {
972789Sahrens 	size_t			i;
973789Sahrens 	kmem_cache_t		*prev_cache = NULL;
974789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
975789Sahrens 
976789Sahrens 	/*
977789Sahrens 	 * an agressive reclamation will shrink the cache size as well as reap
978789Sahrens 	 * free kmem buffers.  The arc_kmem_reclaim function is called when the
979789Sahrens 	 * header-cache is reaped, so we only reap the header cache if we're
980789Sahrens 	 * performing an agressive reclaim.  If we're not, just clean the kmem
981789Sahrens 	 * buffer caches.
982789Sahrens 	 */
983789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
984789Sahrens 		kmem_cache_reap_now(hdr_cache);
985789Sahrens 
986789Sahrens 	kmem_cache_reap_now(buf_cache);
987789Sahrens 
988789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
989789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
990789Sahrens 			prev_cache = zio_buf_cache[i];
991789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
992789Sahrens 		}
993789Sahrens 	}
994789Sahrens }
995789Sahrens 
996789Sahrens static void
997789Sahrens arc_reclaim_thread(void)
998789Sahrens {
999789Sahrens 	clock_t			growtime = 0;
1000789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1001789Sahrens 	callb_cpr_t		cpr;
1002789Sahrens 
1003789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1004789Sahrens 
1005789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1006789Sahrens 	while (arc_thread_exit == 0) {
1007789Sahrens 		if (arc_reclaim_needed()) {
1008789Sahrens 
1009789Sahrens 			if (arc.no_grow) {
1010789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1011789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1012789Sahrens 				} else {
1013789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1014789Sahrens 				}
1015789Sahrens 			} else {
1016789Sahrens 				arc.no_grow = TRUE;
1017789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1018789Sahrens 				membar_producer();
1019789Sahrens 			}
1020789Sahrens 
1021789Sahrens 			/* reset the growth delay for every reclaim */
1022789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
1023789Sahrens 
1024789Sahrens 			arc_kmem_reap_now(last_reclaim);
1025789Sahrens 
1026789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1027789Sahrens 			arc.no_grow = FALSE;
1028789Sahrens 		}
1029789Sahrens 
1030789Sahrens 		/* block until needed, or one second, whichever is shorter */
1031789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1032789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1033789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1034789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1035789Sahrens 	}
1036789Sahrens 
1037789Sahrens 	arc_thread_exit = 0;
1038789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1039789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1040789Sahrens 	thread_exit();
1041789Sahrens }
1042789Sahrens 
1043789Sahrens static void
1044789Sahrens arc_try_grow(int64_t bytes)
1045789Sahrens {
1046789Sahrens 	/*
1047789Sahrens 	 * If we're within (2 * maxblocksize) bytes of the target
1048789Sahrens 	 * cache size, increment the target cache size
1049789Sahrens 	 */
1050789Sahrens 	atomic_add_64((uint64_t *)&arc.size_check, 1);
1051789Sahrens 
1052789Sahrens 	if (arc_reclaim_needed()) {
1053789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1054789Sahrens 		return;
1055789Sahrens 	}
1056789Sahrens 
1057789Sahrens 	if (arc.no_grow)
1058789Sahrens 		return;
1059789Sahrens 
1060789Sahrens 	/*
1061789Sahrens 	 * return true if we successfully grow, or if there's enough space that
1062789Sahrens 	 * we don't have to grow.  Above, we return false if we can't grow, or
1063789Sahrens 	 * if we shouldn't because a reclaim is in progress.
1064789Sahrens 	 */
1065789Sahrens 	if ((arc.c - arc.size) <= (2ULL << SPA_MAXBLOCKSHIFT)) {
1066789Sahrens 		if (arc.size_check > 0) {
1067789Sahrens 			arc.size_check = arc_size_check_default;
1068789Sahrens 			atomic_add_64(&arc.incr, arc_incr_size);
1069789Sahrens 		}
1070789Sahrens 		atomic_add_64(&arc.c, MIN(bytes, arc.incr));
1071789Sahrens 		if (arc.c > arc.c_max)
1072789Sahrens 			arc.c = arc.c_max;
1073789Sahrens 		else
1074789Sahrens 			atomic_add_64(&arc.p, MIN(bytes, arc.incr));
1075789Sahrens 	} else if (arc.size > arc.c) {
1076789Sahrens 		if (arc.size_check > 0) {
1077789Sahrens 			arc.size_check = arc_size_check_default;
1078789Sahrens 			atomic_add_64(&arc.incr, arc_incr_size);
1079789Sahrens 		}
1080789Sahrens 		atomic_add_64(&arc.c, MIN(bytes, arc.incr));
1081789Sahrens 		if (arc.c > arc.c_max)
1082789Sahrens 			arc.c = arc.c_max;
1083789Sahrens 		else
1084789Sahrens 			atomic_add_64(&arc.p, MIN(bytes, arc.incr));
1085789Sahrens 	}
1086789Sahrens }
1087789Sahrens 
1088789Sahrens /*
1089789Sahrens  * check if the cache has reached its limits and eviction is required prior to
1090789Sahrens  * insert.  In this situation, we want to evict if no_grow is set Otherwise, the
1091789Sahrens  * cache is either big enough that we can insert, or a arc_try_grow will result
1092789Sahrens  * in more space being made available.
1093789Sahrens  */
1094789Sahrens 
1095789Sahrens static int
1096789Sahrens arc_evict_needed()
1097789Sahrens {
1098789Sahrens 
1099789Sahrens 	if (arc_reclaim_needed())
1100789Sahrens 		return (1);
1101789Sahrens 
1102789Sahrens 	if (arc.no_grow || (arc.c > arc.c_max) || (arc.size > arc.c))
1103789Sahrens 		return (1);
1104789Sahrens 
1105789Sahrens 	return (0);
1106789Sahrens }
1107789Sahrens 
1108789Sahrens /*
1109789Sahrens  * The state, supplied as the first argument, is going to have something
1110789Sahrens  * inserted on its behalf. So, determine which cache must be victimized to
1111789Sahrens  * satisfy an insertion for this state.  We have the following cases:
1112789Sahrens  *
1113789Sahrens  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru_top) ->
1114789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1115789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1116789Sahrens  *
1117789Sahrens  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru_top) ->
1118789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1119789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1120789Sahrens  * entries.
1121789Sahrens  *
1122789Sahrens  * 3. Insert for MFU (c - p) > sizeof(arc.mfu_top) ->
1123789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1124789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1125789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1126789Sahrens  *
1127789Sahrens  * 4. Insert for MFU (c - p) < sizeof(arc.mfu_top) ->
1128789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1129789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1130789Sahrens  */
1131789Sahrens static void
1132789Sahrens arc_evict_for_state(arc_state_t *state, uint64_t bytes)
1133789Sahrens {
1134789Sahrens 	uint64_t	mru_used;
1135789Sahrens 	uint64_t	mfu_space;
1136789Sahrens 	uint64_t	evicted;
1137789Sahrens 
1138789Sahrens 	ASSERT(state == arc.mru_top || state == arc.mfu_top);
1139789Sahrens 
1140789Sahrens 	if (state == arc.mru_top) {
1141789Sahrens 		mru_used = arc.anon->size + arc.mru_top->size;
1142789Sahrens 		if (arc.p > mru_used) {
1143789Sahrens 			/* case 1 */
1144789Sahrens 			evicted = arc_evict_state(arc.mfu_top, bytes);
1145789Sahrens 			if (evicted < bytes) {
1146789Sahrens 				arc_adjust();
1147789Sahrens 			}
1148789Sahrens 		} else {
1149789Sahrens 			/* case 2 */
1150789Sahrens 			evicted = arc_evict_state(arc.mru_top, bytes);
1151789Sahrens 			if (evicted < bytes) {
1152789Sahrens 				arc_adjust();
1153789Sahrens 			}
1154789Sahrens 		}
1155789Sahrens 	} else {
1156789Sahrens 		/* MFU_top case */
1157789Sahrens 		mfu_space = arc.c - arc.p;
1158789Sahrens 		if (mfu_space > arc.mfu_top->size) {
1159789Sahrens 			/* case 3 */
1160789Sahrens 			evicted = arc_evict_state(arc.mru_top, bytes);
1161789Sahrens 			if (evicted < bytes) {
1162789Sahrens 				arc_adjust();
1163789Sahrens 			}
1164789Sahrens 		} else {
1165789Sahrens 			/* case 4 */
1166789Sahrens 			evicted = arc_evict_state(arc.mfu_top, bytes);
1167789Sahrens 			if (evicted < bytes) {
1168789Sahrens 				arc_adjust();
1169789Sahrens 			}
1170789Sahrens 		}
1171789Sahrens 	}
1172789Sahrens }
1173789Sahrens 
1174789Sahrens /*
1175789Sahrens  * This routine is called whenever a buffer is accessed.
1176789Sahrens  */
1177789Sahrens static void
1178789Sahrens arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1179789Sahrens {
1180789Sahrens 	int		blksz, mult;
1181789Sahrens 
1182789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1183789Sahrens 
1184789Sahrens 	blksz = buf->b_size;
1185789Sahrens 
1186789Sahrens 	if (buf->b_state == arc.anon) {
1187789Sahrens 		/*
1188789Sahrens 		 * This buffer is not in the cache, and does not
1189789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1190789Sahrens 		 * to the MRU state.
1191789Sahrens 		 */
1192789Sahrens 
1193789Sahrens 		arc_try_grow(blksz);
1194789Sahrens 		if (arc_evict_needed()) {
1195789Sahrens 			arc_evict_for_state(arc.mru_top, blksz);
1196789Sahrens 		}
1197789Sahrens 
1198789Sahrens 		ASSERT(buf->b_arc_access == 0);
1199789Sahrens 		buf->b_arc_access = lbolt;
1200789Sahrens 		DTRACE_PROBE1(new_state__mru_top, arc_buf_hdr_t *,
1201789Sahrens 		    buf);
1202789Sahrens 		arc_change_state(arc.mru_top, buf, hash_lock);
1203789Sahrens 
1204789Sahrens 		/*
1205789Sahrens 		 * If we are using less than 2/3 of our total target
1206789Sahrens 		 * cache size, bump up the target size for the MRU
1207789Sahrens 		 * list.
1208789Sahrens 		 */
1209789Sahrens 		if (arc.size < arc.c*2/3) {
1210789Sahrens 			arc.p = arc.anon->size + arc.mru_top->size + arc.c/6;
1211789Sahrens 		}
1212789Sahrens 
1213789Sahrens 	} else if (buf->b_state == arc.mru_top) {
1214789Sahrens 		/*
1215789Sahrens 		 * If this buffer is in the MRU-top state and has the prefetch
1216789Sahrens 		 * flag, the first read was actually part of a prefetch.  In
1217789Sahrens 		 * this situation, we simply want to clear the flag and return.
1218789Sahrens 		 * A subsequent access should bump this into the MFU state.
1219789Sahrens 		 */
1220789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
1221789Sahrens 			buf->b_flags &= ~ARC_PREFETCH;
1222789Sahrens 			atomic_add_64(&arc.mru_top->hits, 1);
1223789Sahrens 			return;
1224789Sahrens 		}
1225789Sahrens 
1226789Sahrens 		/*
1227789Sahrens 		 * This buffer has been "accessed" only once so far,
1228789Sahrens 		 * but it is still in the cache. Move it to the MFU
1229789Sahrens 		 * state.
1230789Sahrens 		 */
1231789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1232789Sahrens 			/*
1233789Sahrens 			 * More than 125ms have passed since we
1234789Sahrens 			 * instantiated this buffer.  Move it to the
1235789Sahrens 			 * most frequently used state.
1236789Sahrens 			 */
1237789Sahrens 			buf->b_arc_access = lbolt;
1238789Sahrens 			DTRACE_PROBE1(new_state__mfu_top,
1239789Sahrens 			    arc_buf_hdr_t *, buf);
1240789Sahrens 			arc_change_state(arc.mfu_top, buf, hash_lock);
1241789Sahrens 		}
1242789Sahrens 		atomic_add_64(&arc.mru_top->hits, 1);
1243789Sahrens 	} else if (buf->b_state == arc.mru_bot) {
1244789Sahrens 		arc_state_t	*new_state;
1245789Sahrens 		/*
1246789Sahrens 		 * This buffer has been "accessed" recently, but
1247789Sahrens 		 * was evicted from the cache.  Move it to the
1248789Sahrens 		 * MFU state.
1249789Sahrens 		 */
1250789Sahrens 
1251789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
1252789Sahrens 			new_state = arc.mru_top;
1253789Sahrens 			DTRACE_PROBE1(new_state__mru_top,
1254789Sahrens 			    arc_buf_hdr_t *, buf);
1255789Sahrens 		} else {
1256789Sahrens 			new_state = arc.mfu_top;
1257789Sahrens 			DTRACE_PROBE1(new_state__mfu_top,
1258789Sahrens 			    arc_buf_hdr_t *, buf);
1259789Sahrens 		}
1260789Sahrens 
1261789Sahrens 		arc_try_grow(blksz);
1262789Sahrens 		if (arc_evict_needed()) {
1263789Sahrens 			arc_evict_for_state(new_state, blksz);
1264789Sahrens 		}
1265789Sahrens 
1266789Sahrens 		/* Bump up the target size of the MRU list */
1267789Sahrens 		mult = ((arc.mru_bot->size >= arc.mfu_bot->size) ?
1268789Sahrens 		    1 : (arc.mfu_bot->size/arc.mru_bot->size));
1269789Sahrens 		arc.p = MIN(arc.c, arc.p + blksz * mult);
1270789Sahrens 
1271789Sahrens 		buf->b_arc_access = lbolt;
1272789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1273789Sahrens 
1274789Sahrens 		atomic_add_64(&arc.mru_bot->hits, 1);
1275789Sahrens 	} else if (buf->b_state == arc.mfu_top) {
1276789Sahrens 		/*
1277789Sahrens 		 * This buffer has been accessed more than once and is
1278789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1279789Sahrens 		 *
1280789Sahrens 		 * NOTE: the add_reference() that occurred when we did
1281789Sahrens 		 * the arc_read() should have kicked this off the list,
1282789Sahrens 		 * so even if it was a prefetch, it will be put back at
1283789Sahrens 		 * the head of the list when we remove_reference().
1284789Sahrens 		 */
1285789Sahrens 		atomic_add_64(&arc.mfu_top->hits, 1);
1286789Sahrens 	} else if (buf->b_state == arc.mfu_bot) {
1287789Sahrens 		/*
1288789Sahrens 		 * This buffer has been accessed more than once but has
1289789Sahrens 		 * been evicted from the cache.  Move it back to the
1290789Sahrens 		 * MFU state.
1291789Sahrens 		 */
1292789Sahrens 
1293789Sahrens 		arc_try_grow(blksz);
1294789Sahrens 		if (arc_evict_needed()) {
1295789Sahrens 			arc_evict_for_state(arc.mfu_top, blksz);
1296789Sahrens 		}
1297789Sahrens 
1298789Sahrens 		/* Bump up the target size for the MFU list */
1299789Sahrens 		mult = ((arc.mfu_bot->size >= arc.mru_bot->size) ?
1300789Sahrens 		    1 : (arc.mru_bot->size/arc.mfu_bot->size));
1301789Sahrens 		arc.p = MAX(0, (int64_t)arc.p - blksz * mult);
1302789Sahrens 
1303789Sahrens 		buf->b_arc_access = lbolt;
1304789Sahrens 		DTRACE_PROBE1(new_state__mfu_top,
1305789Sahrens 		    arc_buf_hdr_t *, buf);
1306789Sahrens 		arc_change_state(arc.mfu_top, buf, hash_lock);
1307789Sahrens 
1308789Sahrens 		atomic_add_64(&arc.mfu_bot->hits, 1);
1309789Sahrens 	} else {
1310789Sahrens 		ASSERT(!"invalid arc state");
1311789Sahrens 	}
1312789Sahrens 
1313789Sahrens }
1314789Sahrens 
1315789Sahrens /* a generic arc_done_func_t which you can use */
1316789Sahrens /* ARGSUSED */
1317789Sahrens void
1318789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1319789Sahrens {
1320789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
1321789Sahrens 	arc_buf_free(buf, arg);
1322789Sahrens }
1323789Sahrens 
1324789Sahrens /* a generic arc_done_func_t which you can use */
1325789Sahrens void
1326789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1327789Sahrens {
1328789Sahrens 	arc_buf_t **bufp = arg;
1329789Sahrens 	if (zio && zio->io_error) {
1330789Sahrens 		arc_buf_free(buf, arg);
1331789Sahrens 		*bufp = NULL;
1332789Sahrens 	} else {
1333789Sahrens 		*bufp = buf;
1334789Sahrens 	}
1335789Sahrens }
1336789Sahrens 
1337789Sahrens static void
1338789Sahrens arc_read_done(zio_t *zio)
1339789Sahrens {
1340789Sahrens 	arc_buf_hdr_t	*hdr;
1341789Sahrens 	arc_buf_t	*buf;
1342789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1343789Sahrens 	kmutex_t	*hash_lock;
1344789Sahrens 	arc_callback_t	*callback_list, *acb;
1345789Sahrens 	int		freeable = FALSE;
1346789Sahrens 
1347789Sahrens 	buf = zio->io_private;
1348789Sahrens 	hdr = buf->b_hdr;
1349789Sahrens 
1350789Sahrens 	if (!HDR_FREED_IN_READ(hdr)) {
1351789Sahrens 		arc_buf_hdr_t *found;
1352789Sahrens 
1353789Sahrens 		found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
1354789Sahrens 		    &hash_lock);
1355789Sahrens 
1356789Sahrens 		/*
1357789Sahrens 		 * Buffer was inserted into hash-table and removed from lists
1358789Sahrens 		 * prior to starting I/O.  We should find this header, since
1359789Sahrens 		 * it's in the hash table, and it should be legit since it's
1360789Sahrens 		 * not possible to evict it during the I/O.
1361789Sahrens 		 */
1362789Sahrens 
1363789Sahrens 		ASSERT(found);
1364789Sahrens 		ASSERT(DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)));
1365789Sahrens 	}
1366789Sahrens 
1367789Sahrens 	/* byteswap if necessary */
1368789Sahrens 	callback_list = hdr->b_acb;
1369789Sahrens 	ASSERT(callback_list != NULL);
1370789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1371789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1372789Sahrens 
1373789Sahrens 	/* create copies of the data buffer for the callers */
1374789Sahrens 	abuf = buf;
1375789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1376789Sahrens 		if (acb->acb_done) {
1377789Sahrens 			if (abuf == NULL) {
1378789Sahrens 				abuf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1379789Sahrens 				abuf->b_data = zio_buf_alloc(hdr->b_size);
1380789Sahrens 				atomic_add_64(&arc.size, hdr->b_size);
1381789Sahrens 				bcopy(buf->b_data, abuf->b_data, hdr->b_size);
1382789Sahrens 				abuf->b_hdr = hdr;
1383789Sahrens 				abuf->b_next = hdr->b_buf;
1384789Sahrens 				hdr->b_buf = abuf;
1385789Sahrens 				atomic_add_64(&hdr->b_state->size, hdr->b_size);
1386789Sahrens 			}
1387789Sahrens 			acb->acb_buf = abuf;
1388789Sahrens 			abuf = NULL;
1389789Sahrens 		} else {
1390789Sahrens 			/*
1391789Sahrens 			 * The caller did not provide a callback function.
1392789Sahrens 			 * In this case, we should just remove the reference.
1393789Sahrens 			 */
1394789Sahrens 			if (HDR_FREED_IN_READ(hdr)) {
1395789Sahrens 				ASSERT3P(hdr->b_state, ==, arc.anon);
1396789Sahrens 				(void) refcount_remove(&hdr->b_refcnt,
1397789Sahrens 				    acb->acb_private);
1398789Sahrens 			} else {
1399789Sahrens 				(void) remove_reference(hdr, hash_lock,
1400789Sahrens 				    acb->acb_private);
1401789Sahrens 			}
1402789Sahrens 		}
1403789Sahrens 	}
1404789Sahrens 	hdr->b_acb = NULL;
1405789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
1406789Sahrens 
1407789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1408789Sahrens 
1409789Sahrens 	if (zio->io_error != 0) {
1410789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1411789Sahrens 		if (hdr->b_state != arc.anon)
1412789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
1413789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1414789Sahrens 	}
1415789Sahrens 
1416789Sahrens 	if (!HDR_FREED_IN_READ(hdr)) {
1417789Sahrens 		/*
1418789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1419789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1420789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1421789Sahrens 		 * getting confused).
1422789Sahrens 		 */
1423789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
1424789Sahrens 			arc_access(hdr, hash_lock);
1425789Sahrens 		mutex_exit(hash_lock);
1426789Sahrens 	} else {
1427789Sahrens 		/*
1428789Sahrens 		 * This block was freed while we waited for the read to
1429789Sahrens 		 * complete.  It has been removed from the hash table and
1430789Sahrens 		 * moved to the anonymous state (so that it won't show up
1431789Sahrens 		 * in the cache).
1432789Sahrens 		 */
1433789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1434789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1435789Sahrens 	}
1436789Sahrens 
1437789Sahrens 	cv_broadcast(&hdr->b_cv);
1438789Sahrens 
1439789Sahrens 	/* execute each callback and free its structure */
1440789Sahrens 	while ((acb = callback_list) != NULL) {
1441789Sahrens 		if (acb->acb_done)
1442789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1443789Sahrens 
1444789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1445789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1446789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1447789Sahrens 		}
1448789Sahrens 
1449789Sahrens 		callback_list = acb->acb_next;
1450789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1451789Sahrens 	}
1452789Sahrens 
1453789Sahrens 	if (freeable)
1454789Sahrens 		arc_hdr_free(hdr);
1455789Sahrens }
1456789Sahrens 
1457789Sahrens /*
1458789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1459789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1460789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1461789Sahrens  * in the callback will be NULL in this case, since no IO was
1462789Sahrens  * required.  If the block is not in the cache pass the read request
1463789Sahrens  * on to the spa with a substitute callback function, so that the
1464789Sahrens  * requested block will be added to the cache.
1465789Sahrens  *
1466789Sahrens  * If a read request arrives for a block that has a read in-progress,
1467789Sahrens  * either wait for the in-progress read to complete (and return the
1468789Sahrens  * results); or, if this is a read with a "done" func, add a record
1469789Sahrens  * to the read to invoke the "done" func when the read completes,
1470789Sahrens  * and return; or just return.
1471789Sahrens  *
1472789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1473789Sahrens  * for readers of this block.
1474789Sahrens  */
1475789Sahrens int
1476789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1477789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
1478789Sahrens     uint32_t arc_flags)
1479789Sahrens {
1480789Sahrens 	arc_buf_hdr_t *hdr;
1481789Sahrens 	arc_buf_t *buf;
1482789Sahrens 	kmutex_t *hash_lock;
1483789Sahrens 	zio_t	*rzio;
1484789Sahrens 
1485789Sahrens top:
1486789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
1487789Sahrens 	if (hdr && hdr->b_buf) {
1488789Sahrens 
1489789Sahrens 		ASSERT((hdr->b_state == arc.mru_top) ||
1490789Sahrens 		    (hdr->b_state == arc.mfu_top) ||
1491789Sahrens 		    ((hdr->b_state == arc.anon) &&
1492789Sahrens 		    (HDR_IO_IN_PROGRESS(hdr))));
1493789Sahrens 
1494789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
1495789Sahrens 
1496789Sahrens 			if ((arc_flags & ARC_NOWAIT) && done) {
1497789Sahrens 				arc_callback_t	*acb = NULL;
1498789Sahrens 
1499789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1500789Sahrens 				    KM_SLEEP);
1501789Sahrens 				acb->acb_done = done;
1502789Sahrens 				acb->acb_private = private;
1503789Sahrens 				acb->acb_byteswap = swap;
1504789Sahrens 				if (pio != NULL)
1505789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1506789Sahrens 					    spa, NULL, NULL, flags);
1507789Sahrens 
1508789Sahrens 				ASSERT(acb->acb_done != NULL);
1509789Sahrens 				acb->acb_next = hdr->b_acb;
1510789Sahrens 				hdr->b_acb = acb;
1511789Sahrens 				add_reference(hdr, hash_lock, private);
1512789Sahrens 				mutex_exit(hash_lock);
1513789Sahrens 				return (0);
1514789Sahrens 			} else if (arc_flags & ARC_WAIT) {
1515789Sahrens 				cv_wait(&hdr->b_cv, hash_lock);
1516789Sahrens 				mutex_exit(hash_lock);
1517789Sahrens 				goto top;
1518789Sahrens 			}
1519789Sahrens 
1520789Sahrens 			mutex_exit(hash_lock);
1521789Sahrens 			return (0);
1522789Sahrens 		}
1523789Sahrens 
1524789Sahrens 		/*
1525789Sahrens 		 * If there is already a reference on this block, create
1526789Sahrens 		 * a new copy of the data so that we will be guaranteed
1527789Sahrens 		 * that arc_release() will always succeed.
1528789Sahrens 		 */
1529789Sahrens 
1530789Sahrens 		if (done)
1531789Sahrens 			add_reference(hdr, hash_lock, private);
1532789Sahrens 		if (done && refcount_count(&hdr->b_refcnt) > 1) {
1533789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1534789Sahrens 			buf->b_data = zio_buf_alloc(hdr->b_size);
1535789Sahrens 			ASSERT3U(refcount_count(&hdr->b_refcnt), >, 1);
1536789Sahrens 			atomic_add_64(&arc.size, hdr->b_size);
1537789Sahrens 			bcopy(hdr->b_buf->b_data, buf->b_data, hdr->b_size);
1538789Sahrens 			buf->b_hdr = hdr;
1539789Sahrens 			buf->b_next = hdr->b_buf;
1540789Sahrens 			hdr->b_buf = buf;
1541789Sahrens 			atomic_add_64(&hdr->b_state->size, hdr->b_size);
1542789Sahrens 		} else {
1543789Sahrens 			buf = hdr->b_buf;
1544789Sahrens 		}
1545789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1546789Sahrens 		arc_access(hdr, hash_lock);
1547789Sahrens 		mutex_exit(hash_lock);
1548789Sahrens 		atomic_add_64(&arc.hits, 1);
1549789Sahrens 		if (done)
1550789Sahrens 			done(NULL, buf, private);
1551789Sahrens 	} else {
1552789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1553789Sahrens 		arc_callback_t	*acb;
1554789Sahrens 
1555789Sahrens 		if (hdr == NULL) {
1556789Sahrens 			/* this block is not in the cache */
1557789Sahrens 			arc_buf_hdr_t	*exists;
1558789Sahrens 
1559789Sahrens 			buf = arc_buf_alloc(spa, size, private);
1560789Sahrens 			hdr = buf->b_hdr;
1561789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1562789Sahrens 			hdr->b_birth = bp->blk_birth;
1563789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1564789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1565789Sahrens 			if (exists) {
1566789Sahrens 				/* somebody beat us to the hash insert */
1567789Sahrens 				mutex_exit(hash_lock);
1568789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1569789Sahrens 				hdr->b_birth = 0;
1570789Sahrens 				hdr->b_cksum0 = 0;
1571789Sahrens 				arc_buf_free(buf, private);
1572789Sahrens 				goto top; /* restart the IO request */
1573789Sahrens 			}
1574789Sahrens 
1575789Sahrens 		} else {
1576789Sahrens 			/* this block is in the ghost cache */
1577789Sahrens 			ASSERT((hdr->b_state == arc.mru_bot) ||
1578789Sahrens 			    (hdr->b_state == arc.mfu_bot));
1579789Sahrens 			add_reference(hdr, hash_lock, private);
1580789Sahrens 
1581789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1582789Sahrens 			buf->b_data = zio_buf_alloc(hdr->b_size);
1583789Sahrens 			atomic_add_64(&arc.size, hdr->b_size);
1584789Sahrens 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1585789Sahrens 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
1586789Sahrens 			buf->b_hdr = hdr;
1587789Sahrens 			buf->b_next = NULL;
1588789Sahrens 			hdr->b_buf = buf;
1589789Sahrens 		}
1590789Sahrens 
1591789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1592789Sahrens 		acb->acb_done = done;
1593789Sahrens 		acb->acb_private = private;
1594789Sahrens 		acb->acb_byteswap = swap;
1595789Sahrens 
1596789Sahrens 		ASSERT(hdr->b_acb == NULL);
1597789Sahrens 		hdr->b_acb = acb;
1598789Sahrens 
1599789Sahrens 		/*
1600789Sahrens 		 * If this DVA is part of a prefetch, mark the buf
1601789Sahrens 		 * header with the prefetch flag
1602789Sahrens 		 */
1603789Sahrens 		if (arc_flags & ARC_PREFETCH)
1604789Sahrens 			hdr->b_flags |= ARC_PREFETCH;
1605789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1606789Sahrens 
1607789Sahrens 		/*
1608789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
1609789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
1610789Sahrens 		 * the header will be marked as I/O in progress and have an
1611789Sahrens 		 * attached buffer.  At this point, anybody who finds this
1612789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
1613789Sahrens 		 */
1614789Sahrens 
1615789Sahrens 		if ((hdr->b_state == arc.mru_bot) ||
1616789Sahrens 		    (hdr->b_state == arc.mfu_bot))
1617789Sahrens 			arc_access(hdr, hash_lock);
1618789Sahrens 
1619789Sahrens 		mutex_exit(hash_lock);
1620789Sahrens 
1621789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
1622789Sahrens 		DTRACE_PROBE2(arc__miss, blkptr_t *, bp,
1623789Sahrens 		    uint64_t, size);
1624789Sahrens 		atomic_add_64(&arc.misses, 1);
1625789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
1626789Sahrens 		    arc_read_done, buf, priority, flags);
1627789Sahrens 
1628789Sahrens 		if (arc_flags & ARC_WAIT)
1629789Sahrens 			return (zio_wait(rzio));
1630789Sahrens 
1631789Sahrens 		ASSERT(arc_flags & ARC_NOWAIT);
1632789Sahrens 		zio_nowait(rzio);
1633789Sahrens 	}
1634789Sahrens 	return (0);
1635789Sahrens }
1636789Sahrens 
1637789Sahrens /*
1638789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
1639789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
1640789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
1641789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
1642789Sahrens  */
1643789Sahrens int
1644789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
1645789Sahrens {
1646789Sahrens 	arc_buf_hdr_t *hdr;
1647789Sahrens 	kmutex_t *hash_mtx;
1648789Sahrens 	int rc = 0;
1649789Sahrens 
1650789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
1651789Sahrens 
1652789Sahrens 	if (hdr && hdr->b_buf && !HDR_IO_IN_PROGRESS(hdr))
1653789Sahrens 		bcopy(hdr->b_buf->b_data, data, hdr->b_size);
1654789Sahrens 	else
1655789Sahrens 		rc = ENOENT;
1656789Sahrens 
1657789Sahrens 	if (hash_mtx)
1658789Sahrens 		mutex_exit(hash_mtx);
1659789Sahrens 
1660789Sahrens 	return (rc);
1661789Sahrens }
1662789Sahrens 
1663789Sahrens /*
1664789Sahrens  * Release this buffer from the cache.  This must be done
1665789Sahrens  * after a read and prior to modifying the buffer contents.
1666789Sahrens  * If the buffer has more than one reference, we must make
1667789Sahrens  * make a new hdr for the buffer.
1668789Sahrens  */
1669789Sahrens void
1670789Sahrens arc_release(arc_buf_t *buf, void *tag)
1671789Sahrens {
1672789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
1673789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
1674789Sahrens 
1675789Sahrens 	/* this buffer is not on any list */
1676789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
1677789Sahrens 
1678789Sahrens 	if (hdr->b_state == arc.anon) {
1679789Sahrens 		/* this buffer is already released */
1680789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
1681789Sahrens 		ASSERT(BUF_EMPTY(hdr));
1682789Sahrens 		return;
1683789Sahrens 	}
1684789Sahrens 
1685789Sahrens 	mutex_enter(hash_lock);
1686789Sahrens 
1687789Sahrens 	if (refcount_count(&hdr->b_refcnt) > 1) {
1688789Sahrens 		arc_buf_hdr_t *nhdr;
1689789Sahrens 		arc_buf_t **bufp;
1690789Sahrens 		uint64_t blksz = hdr->b_size;
1691789Sahrens 		spa_t *spa = hdr->b_spa;
1692789Sahrens 
1693789Sahrens 		/*
1694789Sahrens 		 * Pull the data off of this buf and attach it to
1695789Sahrens 		 * a new anonymous buf.
1696789Sahrens 		 */
1697789Sahrens 		bufp = &hdr->b_buf;
1698789Sahrens 		while (*bufp != buf) {
1699789Sahrens 			ASSERT(*bufp);
1700789Sahrens 			bufp = &(*bufp)->b_next;
1701789Sahrens 		}
1702789Sahrens 		*bufp = (*bufp)->b_next;
1703789Sahrens 		(void) refcount_remove(&hdr->b_refcnt, tag);
1704789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
1705789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
1706789Sahrens 		mutex_exit(hash_lock);
1707789Sahrens 
1708789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
1709789Sahrens 		nhdr->b_size = blksz;
1710789Sahrens 		nhdr->b_spa = spa;
1711789Sahrens 		nhdr->b_buf = buf;
1712789Sahrens 		nhdr->b_state = arc.anon;
1713789Sahrens 		nhdr->b_arc_access = 0;
1714789Sahrens 		nhdr->b_flags = 0;
1715789Sahrens 		buf->b_hdr = nhdr;
1716789Sahrens 		buf->b_next = NULL;
1717789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
1718789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
1719789Sahrens 
1720789Sahrens 		hdr = nhdr;
1721789Sahrens 	} else {
1722789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
1723789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1724789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
1725789Sahrens 		hdr->b_arc_access = 0;
1726789Sahrens 		mutex_exit(hash_lock);
1727789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
1728789Sahrens 		hdr->b_birth = 0;
1729789Sahrens 		hdr->b_cksum0 = 0;
1730789Sahrens 	}
1731789Sahrens }
1732789Sahrens 
1733789Sahrens int
1734789Sahrens arc_released(arc_buf_t *buf)
1735789Sahrens {
1736789Sahrens 	return (buf->b_hdr->b_state == arc.anon);
1737789Sahrens }
1738789Sahrens 
1739789Sahrens static void
1740789Sahrens arc_write_done(zio_t *zio)
1741789Sahrens {
1742789Sahrens 	arc_buf_t *buf;
1743789Sahrens 	arc_buf_hdr_t *hdr;
1744789Sahrens 	arc_callback_t *acb;
1745789Sahrens 
1746789Sahrens 	buf = zio->io_private;
1747789Sahrens 	hdr = buf->b_hdr;
1748789Sahrens 	acb = hdr->b_acb;
1749789Sahrens 	hdr->b_acb = NULL;
1750789Sahrens 
1751789Sahrens 	/* this buffer is on no lists and is not in the hash table */
1752789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
1753789Sahrens 
1754789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
1755789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
1756789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
1757789Sahrens 	/* clear the "in-write" flag */
1758789Sahrens 	hdr->b_hash_next = NULL;
1759789Sahrens 	/* This write may be all-zero */
1760789Sahrens 	if (!BUF_EMPTY(hdr)) {
1761789Sahrens 		arc_buf_hdr_t *exists;
1762789Sahrens 		kmutex_t *hash_lock;
1763789Sahrens 
1764789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
1765789Sahrens 		if (exists) {
1766789Sahrens 			/*
1767789Sahrens 			 * This can only happen if we overwrite for
1768789Sahrens 			 * sync-to-convergence, because we remove
1769789Sahrens 			 * buffers from the hash table when we arc_free().
1770789Sahrens 			 */
1771789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
1772789Sahrens 			    BP_IDENTITY(zio->io_bp)));
1773789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
1774789Sahrens 			    zio->io_bp->blk_birth);
1775789Sahrens 
1776789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
1777789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
1778789Sahrens 			mutex_exit(hash_lock);
1779789Sahrens 			arc_hdr_free(exists);
1780789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1781789Sahrens 			ASSERT3P(exists, ==, NULL);
1782789Sahrens 		}
1783789Sahrens 		arc_access(hdr, hash_lock);
1784789Sahrens 		mutex_exit(hash_lock);
1785789Sahrens 	}
1786789Sahrens 	if (acb && acb->acb_done) {
1787789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
1788789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
1789789Sahrens 	}
1790789Sahrens 
1791789Sahrens 	if (acb)
1792789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1793789Sahrens }
1794789Sahrens 
1795789Sahrens int
1796789Sahrens arc_write(zio_t *pio, spa_t *spa, int checksum, int compress,
1797789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
1798789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
1799789Sahrens     uint32_t arc_flags)
1800789Sahrens {
1801789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
1802789Sahrens 	arc_callback_t	*acb;
1803789Sahrens 	zio_t	*rzio;
1804789Sahrens 
1805789Sahrens 	/* this is a private buffer - no locking required */
1806789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
1807789Sahrens 	ASSERT(BUF_EMPTY(hdr));
1808789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
1809789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1810789Sahrens 	acb->acb_done = done;
1811789Sahrens 	acb->acb_private = private;
1812789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
1813789Sahrens 	hdr->b_acb = acb;
1814789Sahrens 	rzio = zio_write(pio, spa, checksum, compress, txg, bp,
1815789Sahrens 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags);
1816789Sahrens 
1817789Sahrens 	if (arc_flags & ARC_WAIT)
1818789Sahrens 		return (zio_wait(rzio));
1819789Sahrens 
1820789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
1821789Sahrens 	zio_nowait(rzio);
1822789Sahrens 
1823789Sahrens 	return (0);
1824789Sahrens }
1825789Sahrens 
1826789Sahrens int
1827789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1828789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
1829789Sahrens {
1830789Sahrens 	arc_buf_hdr_t *ab;
1831789Sahrens 	kmutex_t *hash_lock;
1832789Sahrens 	zio_t	*zio;
1833789Sahrens 
1834789Sahrens 	/*
1835789Sahrens 	 * If this buffer is in the cache, release it, so it
1836789Sahrens 	 * can be re-used.
1837789Sahrens 	 */
1838789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
1839789Sahrens 	if (ab != NULL) {
1840789Sahrens 		/*
1841789Sahrens 		 * The checksum of blocks to free is not always
1842789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
1843789Sahrens 		 * nonzero, it should match what we have in the cache.
1844789Sahrens 		 */
1845789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
1846789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
1847789Sahrens 		arc_change_state(arc.anon, ab, hash_lock);
1848789Sahrens 		if (refcount_is_zero(&ab->b_refcnt)) {
1849789Sahrens 			mutex_exit(hash_lock);
1850789Sahrens 			arc_hdr_free(ab);
1851789Sahrens 			atomic_add_64(&arc.deleted, 1);
1852789Sahrens 		} else {
1853789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 1);
1854789Sahrens 			if (HDR_IO_IN_PROGRESS(ab))
1855789Sahrens 				ab->b_flags |= ARC_FREED_IN_READ;
1856789Sahrens 			ab->b_arc_access = 0;
1857789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
1858789Sahrens 			ab->b_birth = 0;
1859789Sahrens 			ab->b_cksum0 = 0;
1860789Sahrens 			mutex_exit(hash_lock);
1861789Sahrens 		}
1862789Sahrens 	}
1863789Sahrens 
1864789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
1865789Sahrens 
1866789Sahrens 	if (arc_flags & ARC_WAIT)
1867789Sahrens 		return (zio_wait(zio));
1868789Sahrens 
1869789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
1870789Sahrens 	zio_nowait(zio);
1871789Sahrens 
1872789Sahrens 	return (0);
1873789Sahrens }
1874789Sahrens 
1875789Sahrens void
1876789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
1877789Sahrens {
1878789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
1879789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
1880789Sahrens }
1881789Sahrens 
1882789Sahrens int
1883789Sahrens arc_tempreserve_space(uint64_t tempreserve)
1884789Sahrens {
1885789Sahrens #ifdef ZFS_DEBUG
1886789Sahrens 	/*
1887789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
1888789Sahrens 	 */
1889789Sahrens 	if (spa_get_random(10000) == 0) {
1890789Sahrens 		dprintf("forcing random failure\n");
1891789Sahrens 		return (ERESTART);
1892789Sahrens 	}
1893789Sahrens #endif
1894*982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
1895*982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
1896*982Smaybee 	if (tempreserve > arc.c)
1897*982Smaybee 		return (ENOMEM);
1898*982Smaybee 
1899789Sahrens 	/*
1900*982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
1901*982Smaybee 	 * gets too large.  We try to keep the cache less than half full
1902*982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
1903*982Smaybee 	 * Note: if two requests come in concurrently, we might let them
1904*982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
1905*982Smaybee 	 *
1906*982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
1907*982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
1908789Sahrens 	 */
1909789Sahrens 
1910*982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
1911*982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
1912789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
1913789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
1914789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
1915789Sahrens 		    tempreserve>>10, arc.c>>10);
1916789Sahrens 		return (ERESTART);
1917789Sahrens 	}
1918789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
1919789Sahrens 	return (0);
1920789Sahrens }
1921789Sahrens 
1922789Sahrens void
1923789Sahrens arc_init(void)
1924789Sahrens {
1925789Sahrens 	mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
1926789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
1927789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
1928789Sahrens 
1929789Sahrens 	/* Start out with 1/8 of all memory */
1930789Sahrens 	arc.c = physmem * PAGESIZE / 8;
1931789Sahrens 
1932789Sahrens #ifdef _KERNEL
1933789Sahrens 	/*
1934789Sahrens 	 * On architectures where the physical memory can be larger
1935789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
1936789Sahrens 	 * need to limit the cache to 1/8 of VM size.
1937789Sahrens 	 */
1938789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
1939789Sahrens #endif
1940789Sahrens 
1941*982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
1942789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
1943*982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
1944789Sahrens 	if (arc.c * 8 >= 1<<30)
1945789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
1946789Sahrens 	else
1947789Sahrens 		arc.c_max = arc.c_min;
1948789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
1949789Sahrens 	arc.c = arc.c_max;
1950789Sahrens 	arc.p = (arc.c >> 1);
1951789Sahrens 
1952789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
1953789Sahrens 	if (kmem_debugging())
1954789Sahrens 		arc.c = arc.c / 2;
1955789Sahrens 	if (arc.c < arc.c_min)
1956789Sahrens 		arc.c = arc.c_min;
1957789Sahrens 
1958789Sahrens 	arc.anon = &ARC_anon;
1959789Sahrens 	arc.mru_top = &ARC_mru_top;
1960789Sahrens 	arc.mru_bot = &ARC_mru_bot;
1961789Sahrens 	arc.mfu_top = &ARC_mfu_top;
1962789Sahrens 	arc.mfu_bot = &ARC_mfu_bot;
1963789Sahrens 
1964789Sahrens 	list_create(&arc.mru_top->list, sizeof (arc_buf_hdr_t),
1965789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
1966789Sahrens 	list_create(&arc.mru_bot->list, sizeof (arc_buf_hdr_t),
1967789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
1968789Sahrens 	list_create(&arc.mfu_top->list, sizeof (arc_buf_hdr_t),
1969789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
1970789Sahrens 	list_create(&arc.mfu_bot->list, sizeof (arc_buf_hdr_t),
1971789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
1972789Sahrens 
1973789Sahrens 	buf_init();
1974789Sahrens 
1975789Sahrens 	arc_thread_exit = 0;
1976789Sahrens 
1977789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
1978789Sahrens 	    TS_RUN, minclsyspri);
1979789Sahrens }
1980789Sahrens 
1981789Sahrens void
1982789Sahrens arc_fini(void)
1983789Sahrens {
1984789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1985789Sahrens 	arc_thread_exit = 1;
1986789Sahrens 	while (arc_thread_exit != 0)
1987789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
1988789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
1989789Sahrens 
1990789Sahrens 	arc_flush();
1991789Sahrens 
1992789Sahrens 	arc_dead = TRUE;
1993789Sahrens 
1994789Sahrens 	mutex_destroy(&arc_reclaim_lock);
1995789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
1996789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
1997789Sahrens 
1998789Sahrens 	list_destroy(&arc.mru_top->list);
1999789Sahrens 	list_destroy(&arc.mru_bot->list);
2000789Sahrens 	list_destroy(&arc.mfu_top->list);
2001789Sahrens 	list_destroy(&arc.mfu_bot->list);
2002789Sahrens 
2003789Sahrens 	buf_fini();
2004789Sahrens }
2005