xref: /dflybsd-src/sys/vfs/hammer/hammer_flusher.c (revision edcd6db23946027d6cec4d781a950acec4f7dd88)
1059819e3SMatthew Dillon /*
2059819e3SMatthew Dillon  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3059819e3SMatthew Dillon  *
4059819e3SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
5059819e3SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
6059819e3SMatthew Dillon  *
7059819e3SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
8059819e3SMatthew Dillon  * modification, are permitted provided that the following conditions
9059819e3SMatthew Dillon  * are met:
10059819e3SMatthew Dillon  *
11059819e3SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
12059819e3SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
13059819e3SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
14059819e3SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
15059819e3SMatthew Dillon  *    the documentation and/or other materials provided with the
16059819e3SMatthew Dillon  *    distribution.
17059819e3SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
18059819e3SMatthew Dillon  *    contributors may be used to endorse or promote products derived
19059819e3SMatthew Dillon  *    from this software without specific, prior written permission.
20059819e3SMatthew Dillon  *
21059819e3SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22059819e3SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23059819e3SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24059819e3SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25059819e3SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26059819e3SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27059819e3SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28059819e3SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29059819e3SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30059819e3SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31059819e3SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32059819e3SMatthew Dillon  * SUCH DAMAGE.
33059819e3SMatthew Dillon  *
344889cbd4SMatthew Dillon  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35059819e3SMatthew Dillon  */
36059819e3SMatthew Dillon /*
37059819e3SMatthew Dillon  * HAMMER dependancy flusher thread
38059819e3SMatthew Dillon  *
39059819e3SMatthew Dillon  * Meta data updates create buffer dependancies which are arranged as a
40059819e3SMatthew Dillon  * hierarchy of lists.
41059819e3SMatthew Dillon  */
42059819e3SMatthew Dillon 
43059819e3SMatthew Dillon #include "hammer.h"
44059819e3SMatthew Dillon 
45da2da375SMatthew Dillon static void hammer_flusher_master_thread(void *arg);
46da2da375SMatthew Dillon static void hammer_flusher_slave_thread(void *arg);
47e86903d8SMatthew Dillon static int hammer_flusher_flush(hammer_mount_t hmp, int *nomorep);
48e2a02b72SMatthew Dillon static int hammer_flusher_flush_inode(hammer_inode_t ip, void *data);
49c9b9e29dSMatthew Dillon 
50ff003b11SMatthew Dillon RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
51ff003b11SMatthew Dillon               hammer_ino_rb_compare);
52ff003b11SMatthew Dillon 
53ff003b11SMatthew Dillon /*
54af209b0fSMatthew Dillon  * Support structures for the flusher threads.
55af209b0fSMatthew Dillon  */
5622a0040dSTomohiro Kusumi typedef struct hammer_flusher_info {
577a61b85dSMatthew Dillon 	TAILQ_ENTRY(hammer_flusher_info) entry;
58ba2be8e9STomohiro Kusumi 	hammer_mount_t	hmp;
59af209b0fSMatthew Dillon 	thread_t	td;
607a61b85dSMatthew Dillon 	int		runstate;
617a61b85dSMatthew Dillon 	hammer_flush_group_t flg;
62e2a02b72SMatthew Dillon 	struct hammer_transaction trans;        /* per-slave transaction */
6322a0040dSTomohiro Kusumi } *hammer_flusher_info_t;
64059819e3SMatthew Dillon 
657bc5b8c2SMatthew Dillon /*
667a61b85dSMatthew Dillon  * Sync all inodes pending on the flusher.
677a61b85dSMatthew Dillon  *
687a61b85dSMatthew Dillon  * All flush groups will be flushed.  This does not queue dirty inodes
697a61b85dSMatthew Dillon  * to the flush groups, it just flushes out what has already been queued!
707bc5b8c2SMatthew Dillon  */
71059819e3SMatthew Dillon void
hammer_flusher_sync(hammer_mount_t hmp)72059819e3SMatthew Dillon hammer_flusher_sync(hammer_mount_t hmp)
73059819e3SMatthew Dillon {
74059819e3SMatthew Dillon 	int seq;
75059819e3SMatthew Dillon 
767a61b85dSMatthew Dillon 	seq = hammer_flusher_async(hmp, NULL);
77f437a2abSMatthew Dillon 	hammer_flusher_wait(hmp, seq);
78059819e3SMatthew Dillon }
79059819e3SMatthew Dillon 
807bc5b8c2SMatthew Dillon /*
8137646115SMatthew Dillon  * Sync all flush groups through to close_flg - return immediately.
8237646115SMatthew Dillon  * If close_flg is NULL all flush groups are synced.
837a61b85dSMatthew Dillon  *
8437646115SMatthew Dillon  * Returns the sequence number of the last closed flush group,
8537646115SMatthew Dillon  * which may be close_flg.  When syncing to the end if there
86e86903d8SMatthew Dillon  * are no flush groups pending we still cycle the flusher, and
87e86903d8SMatthew Dillon  * must allocate a sequence number to placemark the spot even
88e86903d8SMatthew Dillon  * though no flush group will ever be associated with it.
897bc5b8c2SMatthew Dillon  */
9093291532SMatthew Dillon int
hammer_flusher_async(hammer_mount_t hmp,hammer_flush_group_t close_flg)917a61b85dSMatthew Dillon hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
92059819e3SMatthew Dillon {
937a61b85dSMatthew Dillon 	hammer_flush_group_t flg;
9437646115SMatthew Dillon 	int seq;
9593291532SMatthew Dillon 
9637646115SMatthew Dillon 	/*
9737646115SMatthew Dillon 	 * Already closed
9837646115SMatthew Dillon 	 */
9937646115SMatthew Dillon 	if (close_flg && close_flg->closed)
10037646115SMatthew Dillon 		return(close_flg->seq);
10137646115SMatthew Dillon 
10237646115SMatthew Dillon 	/*
10337646115SMatthew Dillon 	 * Close flush groups until we hit the end of the list
10437646115SMatthew Dillon 	 * or close_flg.
10537646115SMatthew Dillon 	 */
10637646115SMatthew Dillon 	while ((flg = hmp->next_flush_group) != NULL) {
10737646115SMatthew Dillon 		KKASSERT(flg->closed == 0 && flg->running == 0);
1087a61b85dSMatthew Dillon 		flg->closed = 1;
10937646115SMatthew Dillon 		hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1107a61b85dSMatthew Dillon 		if (flg == close_flg)
1117a61b85dSMatthew Dillon 			break;
1127a61b85dSMatthew Dillon 	}
11337646115SMatthew Dillon 
114da2da375SMatthew Dillon 	if (hmp->flusher.td) {
115da2da375SMatthew Dillon 		if (hmp->flusher.signal++ == 0)
116da2da375SMatthew Dillon 			wakeup(&hmp->flusher.signal);
117e86903d8SMatthew Dillon 		if (flg) {
118e86903d8SMatthew Dillon 			seq = flg->seq;
119e86903d8SMatthew Dillon 		} else {
120e86903d8SMatthew Dillon 			seq = hmp->flusher.next;
121e86903d8SMatthew Dillon 			++hmp->flusher.next;
122e86903d8SMatthew Dillon 		}
12393291532SMatthew Dillon 	} else {
12493291532SMatthew Dillon 		seq = hmp->flusher.done;
1251f07f686SMatthew Dillon 	}
12693291532SMatthew Dillon 	return(seq);
12793291532SMatthew Dillon }
12893291532SMatthew Dillon 
12937646115SMatthew Dillon /*
13037646115SMatthew Dillon  * Flush the current/next flushable flg.  This function is typically called
13137646115SMatthew Dillon  * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
13237646115SMatthew Dillon  * flush groups until specific conditions are met.
13337646115SMatthew Dillon  *
13437646115SMatthew Dillon  * If a flush is currently in progress its seq is returned.
13537646115SMatthew Dillon  *
13637646115SMatthew Dillon  * If no flush is currently in progress the next available flush group
13737646115SMatthew Dillon  * will be flushed and its seq returned.
13837646115SMatthew Dillon  *
13937646115SMatthew Dillon  * If no flush groups are present a dummy seq will be allocated and
14037646115SMatthew Dillon  * returned and the flusher will be activated (e.g. to flush the
14137646115SMatthew Dillon  * undo/redo and the volume header).
14237646115SMatthew Dillon  */
14315e75dabSMatthew Dillon int
hammer_flusher_async_one(hammer_mount_t hmp)14415e75dabSMatthew Dillon hammer_flusher_async_one(hammer_mount_t hmp)
14515e75dabSMatthew Dillon {
14637646115SMatthew Dillon 	hammer_flush_group_t flg;
14715e75dabSMatthew Dillon 	int seq;
14815e75dabSMatthew Dillon 
14915e75dabSMatthew Dillon 	if (hmp->flusher.td) {
15037646115SMatthew Dillon 		flg = TAILQ_FIRST(&hmp->flush_group_list);
15137646115SMatthew Dillon 		seq = hammer_flusher_async(hmp, flg);
15215e75dabSMatthew Dillon 	} else {
15315e75dabSMatthew Dillon 		seq = hmp->flusher.done;
15415e75dabSMatthew Dillon 	}
15515e75dabSMatthew Dillon 	return(seq);
15615e75dabSMatthew Dillon }
15715e75dabSMatthew Dillon 
158f437a2abSMatthew Dillon /*
159e86903d8SMatthew Dillon  * Wait for the flusher to finish flushing the specified sequence
160e86903d8SMatthew Dillon  * number.  The flush is already running and will signal us on
161e86903d8SMatthew Dillon  * each completion.
162f437a2abSMatthew Dillon  */
16393291532SMatthew Dillon void
hammer_flusher_wait(hammer_mount_t hmp,int seq)16493291532SMatthew Dillon hammer_flusher_wait(hammer_mount_t hmp, int seq)
16593291532SMatthew Dillon {
166460925a6STomohiro Kusumi 	while (seq - hmp->flusher.done > 0)
16793291532SMatthew Dillon 		tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
168059819e3SMatthew Dillon }
169059819e3SMatthew Dillon 
170e2a02b72SMatthew Dillon /*
171e2a02b72SMatthew Dillon  * Returns non-zero if the flusher is currently running.  Used for
172e2a02b72SMatthew Dillon  * time-domain multiplexing of frontend operations in order to avoid
173e2a02b72SMatthew Dillon  * starving the backend flusher.
174e2a02b72SMatthew Dillon  */
175e2a02b72SMatthew Dillon int
hammer_flusher_running(hammer_mount_t hmp)176e2a02b72SMatthew Dillon hammer_flusher_running(hammer_mount_t hmp)
177e2a02b72SMatthew Dillon {
178e2a02b72SMatthew Dillon 	int seq = hmp->flusher.next - 1;
179460925a6STomohiro Kusumi 	if (seq - hmp->flusher.done > 0)
180e2a02b72SMatthew Dillon 		return(1);
181e2a02b72SMatthew Dillon 	return (0);
182e2a02b72SMatthew Dillon }
183e2a02b72SMatthew Dillon 
184059819e3SMatthew Dillon void
hammer_flusher_wait_next(hammer_mount_t hmp)18582010f9fSMatthew Dillon hammer_flusher_wait_next(hammer_mount_t hmp)
18682010f9fSMatthew Dillon {
18782010f9fSMatthew Dillon 	int seq;
18882010f9fSMatthew Dillon 
18982010f9fSMatthew Dillon 	seq = hammer_flusher_async_one(hmp);
19082010f9fSMatthew Dillon 	hammer_flusher_wait(hmp, seq);
19182010f9fSMatthew Dillon }
19282010f9fSMatthew Dillon 
19382010f9fSMatthew Dillon void
hammer_flusher_create(hammer_mount_t hmp)194059819e3SMatthew Dillon hammer_flusher_create(hammer_mount_t hmp)
195059819e3SMatthew Dillon {
196da2da375SMatthew Dillon 	hammer_flusher_info_t info;
197da2da375SMatthew Dillon 	int i;
198da2da375SMatthew Dillon 
199da2da375SMatthew Dillon 	hmp->flusher.signal = 0;
200da2da375SMatthew Dillon 	hmp->flusher.done = 0;
201da2da375SMatthew Dillon 	hmp->flusher.next = 1;
202da2da375SMatthew Dillon 	hammer_ref(&hmp->flusher.finalize_lock);
2037a61b85dSMatthew Dillon 	TAILQ_INIT(&hmp->flusher.run_list);
2047a61b85dSMatthew Dillon 	TAILQ_INIT(&hmp->flusher.ready_list);
205da2da375SMatthew Dillon 
206da2da375SMatthew Dillon 	lwkt_create(hammer_flusher_master_thread, hmp,
2073038a8caSMatthew Dillon 		    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
208da2da375SMatthew Dillon 	for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
209bac808feSMatthew Dillon 		info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
210da2da375SMatthew Dillon 		info->hmp = hmp;
2117a61b85dSMatthew Dillon 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
212da2da375SMatthew Dillon 		lwkt_create(hammer_flusher_slave_thread, info,
2133038a8caSMatthew Dillon 			    &info->td, NULL, 0, -1, "hammer-S%d", i);
214da2da375SMatthew Dillon 	}
215059819e3SMatthew Dillon }
216059819e3SMatthew Dillon 
217059819e3SMatthew Dillon void
hammer_flusher_destroy(hammer_mount_t hmp)218059819e3SMatthew Dillon hammer_flusher_destroy(hammer_mount_t hmp)
219059819e3SMatthew Dillon {
220da2da375SMatthew Dillon 	hammer_flusher_info_t info;
221da2da375SMatthew Dillon 
222da2da375SMatthew Dillon 	/*
223da2da375SMatthew Dillon 	 * Kill the master
224da2da375SMatthew Dillon 	 */
225da2da375SMatthew Dillon 	hmp->flusher.exiting = 1;
226da2da375SMatthew Dillon 	while (hmp->flusher.td) {
227da2da375SMatthew Dillon 		++hmp->flusher.signal;
228da2da375SMatthew Dillon 		wakeup(&hmp->flusher.signal);
229da2da375SMatthew Dillon 		tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
230da2da375SMatthew Dillon 	}
231da2da375SMatthew Dillon 
232da2da375SMatthew Dillon 	/*
233da2da375SMatthew Dillon 	 * Kill the slaves
234da2da375SMatthew Dillon 	 */
2357a61b85dSMatthew Dillon 	while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
2367a61b85dSMatthew Dillon 		KKASSERT(info->runstate == 0);
2377a61b85dSMatthew Dillon 		TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
2387a61b85dSMatthew Dillon 		info->runstate = -1;
2397a61b85dSMatthew Dillon 		wakeup(&info->runstate);
2407a61b85dSMatthew Dillon 		while (info->td)
241da2da375SMatthew Dillon 			tsleep(&info->td, 0, "hmrwwc", 0);
242bac808feSMatthew Dillon 		kfree(info, hmp->m_misc);
243059819e3SMatthew Dillon 	}
244f90dde4cSMatthew Dillon }
245059819e3SMatthew Dillon 
246af209b0fSMatthew Dillon /*
247af209b0fSMatthew Dillon  * The master flusher thread manages the flusher sequence id and
248af209b0fSMatthew Dillon  * synchronization with the slave work threads.
249af209b0fSMatthew Dillon  */
250059819e3SMatthew Dillon static void
hammer_flusher_master_thread(void * arg)251da2da375SMatthew Dillon hammer_flusher_master_thread(void *arg)
252059819e3SMatthew Dillon {
2537a61b85dSMatthew Dillon 	hammer_mount_t hmp;
254e86903d8SMatthew Dillon 	int seq;
255e86903d8SMatthew Dillon 	int nomore;
2560729c8c8SMatthew Dillon 
2577a61b85dSMatthew Dillon 	hmp = arg;
2587a61b85dSMatthew Dillon 
259b0aab9b9SMatthew Dillon 	lwkt_gettoken(&hmp->fs_token);
260b0aab9b9SMatthew Dillon 
2617a61b85dSMatthew Dillon 	for (;;) {
2627a61b85dSMatthew Dillon 		/*
263e86903d8SMatthew Dillon 		 * Flush all sequence numbers up to but not including .next,
264e86903d8SMatthew Dillon 		 * or until an open flush group is encountered.
2657a61b85dSMatthew Dillon 		 */
266059819e3SMatthew Dillon 		for (;;) {
267da2da375SMatthew Dillon 			while (hmp->flusher.group_lock)
268da2da375SMatthew Dillon 				tsleep(&hmp->flusher.group_lock, 0, "hmrhld",0);
26910a5d1baSMatthew Dillon 			hammer_flusher_clean_loose_ios(hmp);
270e86903d8SMatthew Dillon 
271e86903d8SMatthew Dillon 			seq = hammer_flusher_flush(hmp, &nomore);
272e86903d8SMatthew Dillon 			hmp->flusher.done = seq;
273da2da375SMatthew Dillon 			wakeup(&hmp->flusher.done);
274e86903d8SMatthew Dillon 
275cdb6e4e6SMatthew Dillon 			if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
276cdb6e4e6SMatthew Dillon 				break;
277e86903d8SMatthew Dillon 			if (nomore)
278e86903d8SMatthew Dillon 				break;
2797a61b85dSMatthew Dillon 		}
280c32a6806SMatthew Dillon 
281c32a6806SMatthew Dillon 		/*
2821f07f686SMatthew Dillon 		 * Wait for activity.
283c32a6806SMatthew Dillon 		 */
2847a61b85dSMatthew Dillon 		if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
285059819e3SMatthew Dillon 			break;
286da2da375SMatthew Dillon 		while (hmp->flusher.signal == 0)
287da2da375SMatthew Dillon 			tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
288e86903d8SMatthew Dillon 		hmp->flusher.signal = 0;
2891f07f686SMatthew Dillon 	}
290da2da375SMatthew Dillon 
291da2da375SMatthew Dillon 	/*
292da2da375SMatthew Dillon 	 * And we are done.
293da2da375SMatthew Dillon 	 */
294da2da375SMatthew Dillon 	hmp->flusher.td = NULL;
295da2da375SMatthew Dillon 	wakeup(&hmp->flusher.exiting);
296b0aab9b9SMatthew Dillon 	lwkt_reltoken(&hmp->fs_token);
297da2da375SMatthew Dillon 	lwkt_exit();
298da2da375SMatthew Dillon }
299da2da375SMatthew Dillon 
300af209b0fSMatthew Dillon /*
301e86903d8SMatthew Dillon  * Flush the next sequence number until an open flush group is encountered
302e86903d8SMatthew Dillon  * or we reach (next).  Not all sequence numbers will have flush groups
303e86903d8SMatthew Dillon  * associated with them.  These require that the UNDO/REDO FIFO still be
304e86903d8SMatthew Dillon  * flushed since it can take at least one additional run to synchronize
305e86903d8SMatthew Dillon  * the FIFO, and more to also synchronize the reserve structures.
3067a61b85dSMatthew Dillon  */
307e86903d8SMatthew Dillon static int
hammer_flusher_flush(hammer_mount_t hmp,int * nomorep)308e86903d8SMatthew Dillon hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
3097a61b85dSMatthew Dillon {
3107a61b85dSMatthew Dillon 	hammer_flusher_info_t info;
3117a61b85dSMatthew Dillon 	hammer_flush_group_t flg;
3127a61b85dSMatthew Dillon 	hammer_reserve_t resv;
31315e75dabSMatthew Dillon 	int count;
314e86903d8SMatthew Dillon 	int seq;
3157a61b85dSMatthew Dillon 
3167a61b85dSMatthew Dillon 	/*
317e86903d8SMatthew Dillon 	 * Just in-case there's a flush race on mount.  Seq number
318e86903d8SMatthew Dillon 	 * does not change.
3197a61b85dSMatthew Dillon 	 */
32037646115SMatthew Dillon 	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
321e86903d8SMatthew Dillon 		*nomorep = 1;
322e86903d8SMatthew Dillon 		return (hmp->flusher.done);
32337646115SMatthew Dillon 	}
324e86903d8SMatthew Dillon 	*nomorep = 0;
32537646115SMatthew Dillon 
32637646115SMatthew Dillon 	/*
327e86903d8SMatthew Dillon 	 * Flush the next sequence number.  Sequence numbers can exist
328e86903d8SMatthew Dillon 	 * without an assigned flush group, indicating that just a FIFO flush
329e86903d8SMatthew Dillon 	 * should occur.
33037646115SMatthew Dillon 	 */
331e86903d8SMatthew Dillon 	seq = hmp->flusher.done + 1;
33237646115SMatthew Dillon 	flg = TAILQ_FIRST(&hmp->flush_group_list);
33337646115SMatthew Dillon 	if (flg == NULL) {
334e86903d8SMatthew Dillon 		if (seq == hmp->flusher.next) {
335e86903d8SMatthew Dillon 			*nomorep = 1;
336e86903d8SMatthew Dillon 			return (hmp->flusher.done);
337e86903d8SMatthew Dillon 		}
338e86903d8SMatthew Dillon 	} else if (seq == flg->seq) {
339e86903d8SMatthew Dillon 		if (flg->closed) {
34037646115SMatthew Dillon 			KKASSERT(flg->running == 0);
34137646115SMatthew Dillon 			flg->running = 1;
342e86903d8SMatthew Dillon 			if (hmp->fill_flush_group == flg) {
343e86903d8SMatthew Dillon 				hmp->fill_flush_group =
344e86903d8SMatthew Dillon 					TAILQ_NEXT(flg, flush_entry);
345e86903d8SMatthew Dillon 			}
346e86903d8SMatthew Dillon 		} else {
347e86903d8SMatthew Dillon 			*nomorep = 1;
348e86903d8SMatthew Dillon 			return (hmp->flusher.done);
349e86903d8SMatthew Dillon 		}
350e86903d8SMatthew Dillon 	} else {
351b961cdbbSMatthew Dillon 		/*
352b961cdbbSMatthew Dillon 		 * Sequence number problems can only happen if a critical
353b961cdbbSMatthew Dillon 		 * filesystem error occurred which forced the filesystem into
354b961cdbbSMatthew Dillon 		 * read-only mode.
355b961cdbbSMatthew Dillon 		 */
356460925a6STomohiro Kusumi 		KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
357e86903d8SMatthew Dillon 		flg = NULL;
35837646115SMatthew Dillon 	}
3597a61b85dSMatthew Dillon 
3607a61b85dSMatthew Dillon 	/*
3617a61b85dSMatthew Dillon 	 * We only do one flg but we may have to loop/retry.
36237646115SMatthew Dillon 	 *
36337646115SMatthew Dillon 	 * Due to various races it is possible to come across a flush
36437646115SMatthew Dillon 	 * group which as not yet been closed.
3657a61b85dSMatthew Dillon 	 */
36615e75dabSMatthew Dillon 	count = 0;
36737646115SMatthew Dillon 	while (flg && flg->running) {
36815e75dabSMatthew Dillon 		++count;
3697a61b85dSMatthew Dillon 		if (hammer_debug_general & 0x0001) {
37033234d14STomohiro Kusumi 			hdkprintf("%d ttl=%d recs=%d\n",
371e86903d8SMatthew Dillon 				flg->seq, flg->total_count, flg->refs);
3727a61b85dSMatthew Dillon 		}
373cdb6e4e6SMatthew Dillon 		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
374cdb6e4e6SMatthew Dillon 			break;
3757a61b85dSMatthew Dillon 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
3767a61b85dSMatthew Dillon 
3777a61b85dSMatthew Dillon 		/*
3787a61b85dSMatthew Dillon 		 * If the previous flush cycle just about exhausted our
3797a61b85dSMatthew Dillon 		 * UNDO space we may have to do a dummy cycle to move the
3807a61b85dSMatthew Dillon 		 * first_offset up before actually digging into a new cycle,
3817a61b85dSMatthew Dillon 		 * or the new cycle will not have sufficient undo space.
3827a61b85dSMatthew Dillon 		 */
3837a61b85dSMatthew Dillon 		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
3847a61b85dSMatthew Dillon 			hammer_flusher_finalize(&hmp->flusher.trans, 0);
3857a61b85dSMatthew Dillon 
38637646115SMatthew Dillon 		KKASSERT(hmp->next_flush_group != flg);
3877b6ccb11SMatthew Dillon 
3887b6ccb11SMatthew Dillon 		/*
389e2a02b72SMatthew Dillon 		 * Place the flg in the flusher structure and start the
390e2a02b72SMatthew Dillon 		 * slaves running.  The slaves will compete for inodes
391e2a02b72SMatthew Dillon 		 * to flush.
392e2a02b72SMatthew Dillon 		 *
393e2a02b72SMatthew Dillon 		 * Make a per-thread copy of the transaction.
3947a61b85dSMatthew Dillon 		 */
395e2a02b72SMatthew Dillon 		while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
3967a61b85dSMatthew Dillon 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
3977a61b85dSMatthew Dillon 			info->flg = flg;
3987a61b85dSMatthew Dillon 			info->runstate = 1;
399e2a02b72SMatthew Dillon 			info->trans = hmp->flusher.trans;
4007a61b85dSMatthew Dillon 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
4017a61b85dSMatthew Dillon 			wakeup(&info->runstate);
4027a61b85dSMatthew Dillon 		}
4037a61b85dSMatthew Dillon 
4047a61b85dSMatthew Dillon 		/*
4057a61b85dSMatthew Dillon 		 * Wait for all slaves to finish running
4067a61b85dSMatthew Dillon 		 */
4077a61b85dSMatthew Dillon 		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
4087a61b85dSMatthew Dillon 			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
4097a61b85dSMatthew Dillon 
4107a61b85dSMatthew Dillon 		/*
4117a61b85dSMatthew Dillon 		 * Do the final finalization, clean up
4127a61b85dSMatthew Dillon 		 */
4137a61b85dSMatthew Dillon 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
4147a61b85dSMatthew Dillon 		hmp->flusher.tid = hmp->flusher.trans.tid;
4157a61b85dSMatthew Dillon 
4167a61b85dSMatthew Dillon 		hammer_done_transaction(&hmp->flusher.trans);
4177a61b85dSMatthew Dillon 
4187a61b85dSMatthew Dillon 		/*
4197a61b85dSMatthew Dillon 		 * Loop up on the same flg.  If the flg is done clean it up
4207a61b85dSMatthew Dillon 		 * and break out.  We only flush one flg.
4217a61b85dSMatthew Dillon 		 */
422ff003b11SMatthew Dillon 		if (RB_EMPTY(&flg->flush_tree)) {
4237a61b85dSMatthew Dillon 			KKASSERT(flg->refs == 0);
4247a61b85dSMatthew Dillon 			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
425bac808feSMatthew Dillon 			kfree(flg, hmp->m_misc);
4267a61b85dSMatthew Dillon 			break;
4277a61b85dSMatthew Dillon 		}
42837646115SMatthew Dillon 		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
4297a61b85dSMatthew Dillon 	}
4307a61b85dSMatthew Dillon 
4317a61b85dSMatthew Dillon 	/*
4321b0ab2c3SMatthew Dillon 	 * We may have pure meta-data to flush, or we may have to finish
4331b0ab2c3SMatthew Dillon 	 * cycling the UNDO FIFO, even if there were no flush groups.
43415e75dabSMatthew Dillon 	 */
4351b0ab2c3SMatthew Dillon 	if (count == 0 && hammer_flusher_haswork(hmp)) {
43615e75dabSMatthew Dillon 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
43715e75dabSMatthew Dillon 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
43815e75dabSMatthew Dillon 		hammer_done_transaction(&hmp->flusher.trans);
43915e75dabSMatthew Dillon 	}
44015e75dabSMatthew Dillon 
44115e75dabSMatthew Dillon 	/*
4427a61b85dSMatthew Dillon 	 * Clean up any freed big-blocks (typically zone-2).
4437a61b85dSMatthew Dillon 	 * resv->flush_group is typically set several flush groups ahead
4447a61b85dSMatthew Dillon 	 * of the free to ensure that the freed block is not reused until
4457a61b85dSMatthew Dillon 	 * it can no longer be reused.
4467a61b85dSMatthew Dillon 	 */
4477a61b85dSMatthew Dillon 	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
448f8a7a900STomohiro Kusumi 		if (resv->flg_no - seq > 0)
4497a61b85dSMatthew Dillon 			break;
4507a61b85dSMatthew Dillon 		hammer_reserve_clrdelay(hmp, resv);
4517a61b85dSMatthew Dillon 	}
452e86903d8SMatthew Dillon 	return (seq);
4537a61b85dSMatthew Dillon }
4547a61b85dSMatthew Dillon 
4557a61b85dSMatthew Dillon 
4567a61b85dSMatthew Dillon /*
457ff003b11SMatthew Dillon  * The slave flusher thread pulls work off the master flush list until no
458af209b0fSMatthew Dillon  * work is left.
459af209b0fSMatthew Dillon  */
460da2da375SMatthew Dillon static void
hammer_flusher_slave_thread(void * arg)461da2da375SMatthew Dillon hammer_flusher_slave_thread(void *arg)
462da2da375SMatthew Dillon {
4637a61b85dSMatthew Dillon 	hammer_flush_group_t flg;
464da2da375SMatthew Dillon 	hammer_flusher_info_t info;
465da2da375SMatthew Dillon 	hammer_mount_t hmp;
466da2da375SMatthew Dillon 
467da2da375SMatthew Dillon 	info = arg;
468da2da375SMatthew Dillon 	hmp = info->hmp;
469b0aab9b9SMatthew Dillon 	lwkt_gettoken(&hmp->fs_token);
470da2da375SMatthew Dillon 
471da2da375SMatthew Dillon 	for (;;) {
4727a61b85dSMatthew Dillon 		while (info->runstate == 0)
4737a61b85dSMatthew Dillon 			tsleep(&info->runstate, 0, "hmrssw", 0);
4747a61b85dSMatthew Dillon 		if (info->runstate < 0)
475da2da375SMatthew Dillon 			break;
4767a61b85dSMatthew Dillon 		flg = info->flg;
477cb51be26SMatthew Dillon 
478e2a02b72SMatthew Dillon 		RB_SCAN(hammer_fls_rb_tree, &flg->flush_tree, NULL,
479e2a02b72SMatthew Dillon 			hammer_flusher_flush_inode, info);
480e2a02b72SMatthew Dillon 
4817a61b85dSMatthew Dillon 		info->runstate = 0;
482e2a02b72SMatthew Dillon 		info->flg = NULL;
4837a61b85dSMatthew Dillon 		TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
4847a61b85dSMatthew Dillon 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
4857a61b85dSMatthew Dillon 		wakeup(&hmp->flusher.ready_list);
486da2da375SMatthew Dillon 	}
487da2da375SMatthew Dillon 	info->td = NULL;
488da2da375SMatthew Dillon 	wakeup(&info->td);
489b0aab9b9SMatthew Dillon 	lwkt_reltoken(&hmp->fs_token);
490059819e3SMatthew Dillon 	lwkt_exit();
491059819e3SMatthew Dillon }
492059819e3SMatthew Dillon 
493525aad3aSMatthew Dillon void
hammer_flusher_clean_loose_ios(hammer_mount_t hmp)49410a5d1baSMatthew Dillon hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
49510a5d1baSMatthew Dillon {
49610a5d1baSMatthew Dillon 	hammer_buffer_t buffer;
49710a5d1baSMatthew Dillon 	hammer_io_t io;
49810a5d1baSMatthew Dillon 
49910a5d1baSMatthew Dillon 	/*
50010a5d1baSMatthew Dillon 	 * loose ends - buffers without bp's aren't tracked by the kernel
50110a5d1baSMatthew Dillon 	 * and can build up, so clean them out.  This can occur when an
50210a5d1baSMatthew Dillon 	 * IO completes on a buffer with no references left.
503b0aab9b9SMatthew Dillon 	 *
504b0aab9b9SMatthew Dillon 	 * The io_token is needed to protect the list.
50510a5d1baSMatthew Dillon 	 */
5061afb73cfSMatthew Dillon 	if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
507b0aab9b9SMatthew Dillon 		lwkt_gettoken(&hmp->io_token);
5081afb73cfSMatthew Dillon 		while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
5091afb73cfSMatthew Dillon 			KKASSERT(io->mod_root == &hmp->lose_root);
5101afb73cfSMatthew Dillon 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
5111afb73cfSMatthew Dillon 			io->mod_root = NULL;
51210a5d1baSMatthew Dillon 			hammer_ref(&io->lock);
51310a5d1baSMatthew Dillon 			buffer = (void *)io;
51410a5d1baSMatthew Dillon 			hammer_rel_buffer(buffer, 0);
51510a5d1baSMatthew Dillon 		}
516b0aab9b9SMatthew Dillon 		lwkt_reltoken(&hmp->io_token);
51710a5d1baSMatthew Dillon 	}
518525aad3aSMatthew Dillon }
51910a5d1baSMatthew Dillon 
520059819e3SMatthew Dillon /*
5219f5097dcSMatthew Dillon  * Flush a single inode that is part of a flush group.
52206ad81ffSMatthew Dillon  *
523cdb6e4e6SMatthew Dillon  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
524cdb6e4e6SMatthew Dillon  * the front-end should have reserved sufficient space on the media.  Any
525cdb6e4e6SMatthew Dillon  * error other then EWOULDBLOCK will force the mount to be read-only.
526059819e3SMatthew Dillon  */
5279f5097dcSMatthew Dillon static
528e2a02b72SMatthew Dillon int
hammer_flusher_flush_inode(hammer_inode_t ip,void * data)529e2a02b72SMatthew Dillon hammer_flusher_flush_inode(hammer_inode_t ip, void *data)
5309f5097dcSMatthew Dillon {
531e2a02b72SMatthew Dillon 	hammer_flusher_info_t info = data;
532e2a02b72SMatthew Dillon 	hammer_mount_t hmp = info->hmp;
533e2a02b72SMatthew Dillon 	hammer_transaction_t trans = &info->trans;
53406ad81ffSMatthew Dillon 	int error;
5359f5097dcSMatthew Dillon 
536e2a02b72SMatthew Dillon 	/*
537e2a02b72SMatthew Dillon 	 * Several slaves are operating on the same flush group concurrently.
538e2a02b72SMatthew Dillon 	 * The SLAVEFLUSH flag prevents them from tripping over each other.
539e2a02b72SMatthew Dillon 	 *
540e2a02b72SMatthew Dillon 	 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
541e2a02b72SMatthew Dillon 	 *	 to be resynced by another, but normally such inodes are not
542e2a02b72SMatthew Dillon 	 *	 revisited until the master loop gets to them.
543e2a02b72SMatthew Dillon 	 */
544e2a02b72SMatthew Dillon 	if (ip->flags & HAMMER_INODE_SLAVEFLUSH)
545e2a02b72SMatthew Dillon 		return(0);
546e2a02b72SMatthew Dillon 	ip->flags |= HAMMER_INODE_SLAVEFLUSH;
547e2a02b72SMatthew Dillon 	++hammer_stats_inode_flushes;
548e2a02b72SMatthew Dillon 
549525aad3aSMatthew Dillon 	hammer_flusher_clean_loose_ios(hmp);
550a639987cSMatthew Dillon 	vm_wait_nominal();
55102325004SMatthew Dillon 	error = hammer_sync_inode(trans, ip);
552cdb6e4e6SMatthew Dillon 
553cdb6e4e6SMatthew Dillon 	/*
554cdb6e4e6SMatthew Dillon 	 * EWOULDBLOCK can happen under normal operation, all other errors
555cdb6e4e6SMatthew Dillon 	 * are considered extremely serious.  We must set WOULDBLOCK
556cdb6e4e6SMatthew Dillon 	 * mechanics to deal with the mess left over from the abort of the
557cdb6e4e6SMatthew Dillon 	 * previous flush.
558cdb6e4e6SMatthew Dillon 	 */
559cdb6e4e6SMatthew Dillon 	if (error) {
560cdb6e4e6SMatthew Dillon 		ip->flags |= HAMMER_INODE_WOULDBLOCK;
561cdb6e4e6SMatthew Dillon 		if (error == EWOULDBLOCK)
562cdb6e4e6SMatthew Dillon 			error = 0;
563cdb6e4e6SMatthew Dillon 	}
564*edcd6db2STomohiro Kusumi 	hammer_sync_inode_done(ip, error);
565e2a02b72SMatthew Dillon 	/* ip invalid */
566e2a02b72SMatthew Dillon 
567da2da375SMatthew Dillon 	while (hmp->flusher.finalize_want)
568da2da375SMatthew Dillon 		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
56906ad81ffSMatthew Dillon 	if (hammer_flusher_undo_exhausted(trans, 1)) {
570d053aa8aSTomohiro Kusumi 		hkprintf("Warning: UNDO area too small!\n");
5719f5097dcSMatthew Dillon 		hammer_flusher_finalize(trans, 1);
57206ad81ffSMatthew Dillon 	} else if (hammer_flusher_meta_limit(trans->hmp)) {
5739f5097dcSMatthew Dillon 		hammer_flusher_finalize(trans, 0);
574059819e3SMatthew Dillon 	}
575e2a02b72SMatthew Dillon 	return (0);
576059819e3SMatthew Dillon }
577059819e3SMatthew Dillon 
57810a5d1baSMatthew Dillon /*
57906ad81ffSMatthew Dillon  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
58006ad81ffSMatthew Dillon  * space left.
58106ad81ffSMatthew Dillon  *
58206ad81ffSMatthew Dillon  * 1/4 - Emergency free undo space level.  Below this point the flusher
58306ad81ffSMatthew Dillon  *	 will finalize even if directory dependancies have not been resolved.
58406ad81ffSMatthew Dillon  *
58506ad81ffSMatthew Dillon  * 2/4 - Used by the pruning and reblocking code.  These functions may be
58606ad81ffSMatthew Dillon  *	 running in parallel with a flush and cannot be allowed to drop
58706ad81ffSMatthew Dillon  *	 available undo space to emergency levels.
58806ad81ffSMatthew Dillon  *
58906ad81ffSMatthew Dillon  * 3/4 - Used at the beginning of a flush to force-sync the volume header
59006ad81ffSMatthew Dillon  *	 to give the flush plenty of runway to work in.
591ec4e8497SMatthew Dillon  */
592ec4e8497SMatthew Dillon int
hammer_flusher_undo_exhausted(hammer_transaction_t trans,int quarter)59306ad81ffSMatthew Dillon hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
594ec4e8497SMatthew Dillon {
59506ad81ffSMatthew Dillon 	if (hammer_undo_space(trans) <
59606ad81ffSMatthew Dillon 	    hammer_undo_max(trans->hmp) * quarter / 4) {
5971f07f686SMatthew Dillon 		return(1);
5981f07f686SMatthew Dillon 	} else {
5991f07f686SMatthew Dillon 		return(0);
6001f07f686SMatthew Dillon 	}
601ec4e8497SMatthew Dillon }
602ec4e8497SMatthew Dillon 
603ec4e8497SMatthew Dillon /*
6049f5097dcSMatthew Dillon  * Flush all pending UNDOs, wait for write completion, update the volume
6059f5097dcSMatthew Dillon  * header with the new UNDO end position, and flush it.  Then
6069f5097dcSMatthew Dillon  * asynchronously flush the meta-data.
60710a5d1baSMatthew Dillon  *
6089f5097dcSMatthew Dillon  * If this is the last finalization in a flush group we also synchronize
6099f5097dcSMatthew Dillon  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
6109f5097dcSMatthew Dillon  * fifo first_offset so the next flush resets the FIFO pointers.
6116c1f89f4SMatthew Dillon  *
6126c1f89f4SMatthew Dillon  * If this is not final it is being called because too many dirty meta-data
6136c1f89f4SMatthew Dillon  * buffers have built up and must be flushed with UNDO synchronization to
6146c1f89f4SMatthew Dillon  * avoid a buffer cache deadlock.
61510a5d1baSMatthew Dillon  */
61610a5d1baSMatthew Dillon void
hammer_flusher_finalize(hammer_transaction_t trans,int final)6179f5097dcSMatthew Dillon hammer_flusher_finalize(hammer_transaction_t trans, int final)
618059819e3SMatthew Dillon {
6199f5097dcSMatthew Dillon 	hammer_volume_t root_volume;
6209f5097dcSMatthew Dillon 	hammer_blockmap_t cundomap, dundomap;
6219f5097dcSMatthew Dillon 	hammer_mount_t hmp;
62210a5d1baSMatthew Dillon 	hammer_io_t io;
62391ed3855SMatthew Dillon 	hammer_off_t save_undo_next_offset;
624c9b9e29dSMatthew Dillon 	int count;
62519619882SMatthew Dillon 	int i;
62610a5d1baSMatthew Dillon 
6279f5097dcSMatthew Dillon 	hmp = trans->hmp;
6289f5097dcSMatthew Dillon 	root_volume = trans->rootvol;
6299f5097dcSMatthew Dillon 
63047637bffSMatthew Dillon 	/*
6316c1f89f4SMatthew Dillon 	 * Exclusively lock the flusher.  This guarantees that all dirty
6326c1f89f4SMatthew Dillon 	 * buffers will be idled (have a mod-count of 0).
6336c1f89f4SMatthew Dillon 	 */
6346c1f89f4SMatthew Dillon 	++hmp->flusher.finalize_want;
6356c1f89f4SMatthew Dillon 	hammer_lock_ex(&hmp->flusher.finalize_lock);
6366c1f89f4SMatthew Dillon 
6376c1f89f4SMatthew Dillon 	/*
6386c1f89f4SMatthew Dillon 	 * If this isn't the final sync several threads may have hit the
6396c1f89f4SMatthew Dillon 	 * meta-limit at the same time and raced.  Only sync if we really
6406c1f89f4SMatthew Dillon 	 * have to, after acquiring the lock.
6416c1f89f4SMatthew Dillon 	 */
6426c1f89f4SMatthew Dillon 	if (final == 0 && !hammer_flusher_meta_limit(hmp))
6436c1f89f4SMatthew Dillon 		goto done;
6446c1f89f4SMatthew Dillon 
645cdb6e4e6SMatthew Dillon 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
646cdb6e4e6SMatthew Dillon 		goto done;
647cdb6e4e6SMatthew Dillon 
6486c1f89f4SMatthew Dillon 	/*
64947637bffSMatthew Dillon 	 * Flush data buffers.  This can occur asynchronously and at any
6509f5097dcSMatthew Dillon 	 * time.  We must interlock against the frontend direct-data write
6519f5097dcSMatthew Dillon 	 * but do not have to acquire the sync-lock yet.
6529192654cSMatthew Dillon 	 *
6539192654cSMatthew Dillon 	 * These data buffers have already been collected prior to the
6549192654cSMatthew Dillon 	 * related inode(s) getting queued to the flush group.
65547637bffSMatthew Dillon 	 */
65647637bffSMatthew Dillon 	count = 0;
6571afb73cfSMatthew Dillon 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
658cdb6e4e6SMatthew Dillon 		if (io->ioerror)
659cdb6e4e6SMatthew Dillon 			break;
66047637bffSMatthew Dillon 		hammer_ref(&io->lock);
6619f5097dcSMatthew Dillon 		hammer_io_write_interlock(io);
6627fb33ff0STomohiro Kusumi 		KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
663710733a6SMatthew Dillon 		hammer_io_flush(io, 0);
6649f5097dcSMatthew Dillon 		hammer_io_done_interlock(io);
665195f6076STomohiro Kusumi 		hammer_rel_buffer(HAMMER_ITOB(io), 0);
666ba298df1SMatthew Dillon 		hammer_io_limit_backlog(hmp);
667daaabaa0SMatthew Dillon 		++count;
66847637bffSMatthew Dillon 	}
66947637bffSMatthew Dillon 
6709f5097dcSMatthew Dillon 	/*
6719f5097dcSMatthew Dillon 	 * The sync-lock is required for the remaining sequence.  This lock
6729f5097dcSMatthew Dillon 	 * prevents meta-data from being modified.
6739f5097dcSMatthew Dillon 	 */
6742f85fa4dSMatthew Dillon 	hammer_sync_lock_ex(trans);
6759480ff55SMatthew Dillon 
676059819e3SMatthew Dillon 	/*
6779f5097dcSMatthew Dillon 	 * If we have been asked to finalize the volume header sync the
6789f5097dcSMatthew Dillon 	 * cached blockmap to the on-disk blockmap.  Generate an UNDO
6799f5097dcSMatthew Dillon 	 * record for the update.
680e8599db1SMatthew Dillon 	 */
6819f5097dcSMatthew Dillon 	if (final) {
6829f5097dcSMatthew Dillon 		cundomap = &hmp->blockmap[0];
6839f5097dcSMatthew Dillon 		dundomap = &root_volume->ondisk->vol0_blockmap[0];
6849f5097dcSMatthew Dillon 		if (root_volume->io.modified) {
685e8599db1SMatthew Dillon 			hammer_modify_volume(trans, root_volume,
6869f5097dcSMatthew Dillon 					     dundomap, sizeof(hmp->blockmap));
6874c09d9c4SMatthew Dillon 			for (i = 0; i < HAMMER_MAX_ZONES; ++i) {
6884c09d9c4SMatthew Dillon 				hammer_crc_set_blockmap(hmp->version,
6894c09d9c4SMatthew Dillon 							&cundomap[i]);
6904c09d9c4SMatthew Dillon 			}
6919f5097dcSMatthew Dillon 			bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
692e8599db1SMatthew Dillon 			hammer_modify_volume_done(root_volume);
693e8599db1SMatthew Dillon 		}
6949f5097dcSMatthew Dillon 	}
695e8599db1SMatthew Dillon 
696e8599db1SMatthew Dillon 	/*
697eddadaeeSMatthew Dillon 	 * Flush UNDOs.  This can occur concurrently with the data flush
698eddadaeeSMatthew Dillon 	 * because data writes never overwrite.
699eddadaeeSMatthew Dillon 	 *
700eddadaeeSMatthew Dillon 	 * This also waits for I/Os to complete and flushes the cache on
701eddadaeeSMatthew Dillon 	 * the target disk.
70291ed3855SMatthew Dillon 	 *
70391ed3855SMatthew Dillon 	 * Record the UNDO append point as this can continue to change
70491ed3855SMatthew Dillon 	 * after we have flushed the UNDOs.
705059819e3SMatthew Dillon 	 */
70691ed3855SMatthew Dillon 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
70791ed3855SMatthew Dillon 	hammer_lock_ex(&hmp->undo_lock);
70891ed3855SMatthew Dillon 	save_undo_next_offset = cundomap->next_offset;
70991ed3855SMatthew Dillon 	hammer_unlock(&hmp->undo_lock);
7109192654cSMatthew Dillon 	hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
711059819e3SMatthew Dillon 
712cdb6e4e6SMatthew Dillon 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
713cdb6e4e6SMatthew Dillon 		goto failed;
714cdb6e4e6SMatthew Dillon 
715059819e3SMatthew Dillon 	/*
71602428fb6SMatthew Dillon 	 * HAMMER VERSION < 4:
71702428fb6SMatthew Dillon 	 *	Update the on-disk volume header with new UNDO FIFO end
71802428fb6SMatthew Dillon 	 *	position (do not generate new UNDO records for this change).
71902428fb6SMatthew Dillon 	 *	We have to do this for the UNDO FIFO whether (final) is
72002428fb6SMatthew Dillon 	 *	set or not in order for the UNDOs to be recognized on
72102428fb6SMatthew Dillon 	 *	recovery.
72202428fb6SMatthew Dillon 	 *
72302428fb6SMatthew Dillon 	 * HAMMER VERSION >= 4:
72402428fb6SMatthew Dillon 	 *	The UNDO FIFO data written above will be recognized on
72502428fb6SMatthew Dillon 	 *	recovery without us having to sync the volume header.
726c9b9e29dSMatthew Dillon 	 *
7279f5097dcSMatthew Dillon 	 * Also update the on-disk next_tid field.  This does not require
7289f5097dcSMatthew Dillon 	 * an UNDO.  However, because our TID is generated before we get
7299f5097dcSMatthew Dillon 	 * the sync lock another sync may have beat us to the punch.
7309f5097dcSMatthew Dillon 	 *
73106ad81ffSMatthew Dillon 	 * This also has the side effect of updating first_offset based on
73206ad81ffSMatthew Dillon 	 * a prior finalization when the first finalization of the next flush
73306ad81ffSMatthew Dillon 	 * cycle occurs, removing any undo info from the prior finalization
73406ad81ffSMatthew Dillon 	 * from consideration.
73506ad81ffSMatthew Dillon 	 *
7369f5097dcSMatthew Dillon 	 * The volume header will be flushed out synchronously.
737c9b9e29dSMatthew Dillon 	 */
7389f5097dcSMatthew Dillon 	dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
7399f5097dcSMatthew Dillon 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
7409f5097dcSMatthew Dillon 
7419f5097dcSMatthew Dillon 	if (dundomap->first_offset != cundomap->first_offset ||
74291ed3855SMatthew Dillon 		   dundomap->next_offset != save_undo_next_offset) {
743f1c0ae53STomohiro Kusumi 		hammer_modify_volume_noundo(NULL, root_volume);
7449f5097dcSMatthew Dillon 		dundomap->first_offset = cundomap->first_offset;
74591ed3855SMatthew Dillon 		dundomap->next_offset = save_undo_next_offset;
7464c09d9c4SMatthew Dillon 		hammer_crc_set_blockmap(hmp->version, dundomap);
7479f5097dcSMatthew Dillon 		hammer_modify_volume_done(root_volume);
7489f5097dcSMatthew Dillon 	}
7499f5097dcSMatthew Dillon 
7504889cbd4SMatthew Dillon 	/*
7514889cbd4SMatthew Dillon 	 * vol0_next_tid is used for TID selection and is updated without
7524889cbd4SMatthew Dillon 	 * an UNDO so we do not reuse a TID that may have been rolled-back.
7534889cbd4SMatthew Dillon 	 *
7544889cbd4SMatthew Dillon 	 * vol0_last_tid is the highest fully-synchronized TID.  It is
7554889cbd4SMatthew Dillon 	 * set-up when the UNDO fifo is fully synced, later on (not here).
75647f363f1SMatthew Dillon 	 *
75747f363f1SMatthew Dillon 	 * The root volume can be open for modification by other threads
75847f363f1SMatthew Dillon 	 * generating UNDO or REDO records.  For example, reblocking,
75947f363f1SMatthew Dillon 	 * pruning, REDO mode fast-fsyncs, so the write interlock is
76047f363f1SMatthew Dillon 	 * mandatory.
7614889cbd4SMatthew Dillon 	 */
7629f5097dcSMatthew Dillon 	if (root_volume->io.modified) {
763f1c0ae53STomohiro Kusumi 		hammer_modify_volume_noundo(NULL, root_volume);
764adf01747SMatthew Dillon 		if (root_volume->ondisk->vol0_next_tid < trans->tid)
765adf01747SMatthew Dillon 			root_volume->ondisk->vol0_next_tid = trans->tid;
7664c09d9c4SMatthew Dillon 		hammer_crc_set_volume(hmp->version, root_volume->ondisk);
767adf01747SMatthew Dillon 		hammer_modify_volume_done(root_volume);
76847f363f1SMatthew Dillon 		hammer_io_write_interlock(&root_volume->io);
769710733a6SMatthew Dillon 		hammer_io_flush(&root_volume->io, 0);
77047f363f1SMatthew Dillon 		hammer_io_done_interlock(&root_volume->io);
77119619882SMatthew Dillon 	}
772059819e3SMatthew Dillon 
773059819e3SMatthew Dillon 	/*
77402428fb6SMatthew Dillon 	 * Wait for I/Os to complete.
77502428fb6SMatthew Dillon 	 *
77602428fb6SMatthew Dillon 	 * For HAMMER VERSION 4+ filesystems we do not have to wait for
77702428fb6SMatthew Dillon 	 * the I/O to complete as the new UNDO FIFO entries are recognized
77802428fb6SMatthew Dillon 	 * even without the volume header update.  This allows the volume
77902428fb6SMatthew Dillon 	 * header to flushed along with meta-data, significantly reducing
78002428fb6SMatthew Dillon 	 * flush overheads.
781059819e3SMatthew Dillon 	 */
782a99b9ea2SMatthew Dillon 	hammer_flusher_clean_loose_ios(hmp);
78302428fb6SMatthew Dillon 	if (hmp->version < HAMMER_VOL_VERSION_FOUR)
784eddadaeeSMatthew Dillon 		hammer_io_wait_all(hmp, "hmrfl3", 1);
785059819e3SMatthew Dillon 
786cdb6e4e6SMatthew Dillon 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
787cdb6e4e6SMatthew Dillon 		goto failed;
788cdb6e4e6SMatthew Dillon 
789059819e3SMatthew Dillon 	/*
790e8599db1SMatthew Dillon 	 * Flush meta-data.  The meta-data will be undone if we crash
79102428fb6SMatthew Dillon 	 * so we can safely flush it asynchronously.  There is no need
79202428fb6SMatthew Dillon 	 * to wait for I/O to complete (or issue a synchronous disk flush).
7939f5097dcSMatthew Dillon 	 *
79402428fb6SMatthew Dillon 	 * In fact, even if we did wait the meta-data will still be undone
79502428fb6SMatthew Dillon 	 * by a crash up until the next flush cycle due to the first_offset
79602428fb6SMatthew Dillon 	 * in the volume header for the UNDO FIFO not being adjusted until
79702428fb6SMatthew Dillon 	 * the following flush cycle.
79877912481SMatthew Dillon 	 *
79977912481SMatthew Dillon 	 * No io interlock is needed, bioops callbacks will not mess with
80077912481SMatthew Dillon 	 * meta data buffers.
801059819e3SMatthew Dillon 	 */
802c9b9e29dSMatthew Dillon 	count = 0;
8031afb73cfSMatthew Dillon 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
804cdb6e4e6SMatthew Dillon 		if (io->ioerror)
805cdb6e4e6SMatthew Dillon 			break;
80610a5d1baSMatthew Dillon 		KKASSERT(io->modify_refs == 0);
80710a5d1baSMatthew Dillon 		hammer_ref(&io->lock);
8087fb33ff0STomohiro Kusumi 		KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
809710733a6SMatthew Dillon 		hammer_io_flush(io, 0);
810195f6076STomohiro Kusumi 		hammer_rel_buffer(HAMMER_ITOB(io), 0);
811ba298df1SMatthew Dillon 		hammer_io_limit_backlog(hmp);
812daaabaa0SMatthew Dillon 		++count;
813059819e3SMatthew Dillon 	}
8149f5097dcSMatthew Dillon 
8159f5097dcSMatthew Dillon 	/*
8169f5097dcSMatthew Dillon 	 * If this is the final finalization for the flush group set
8179f5097dcSMatthew Dillon 	 * up for the next sequence by setting a new first_offset in
81806ad81ffSMatthew Dillon 	 * our cached blockmap and clearing the undo history.
81906ad81ffSMatthew Dillon 	 *
82006ad81ffSMatthew Dillon 	 * Even though we have updated our cached first_offset, the on-disk
82106ad81ffSMatthew Dillon 	 * first_offset still governs available-undo-space calculations.
82291ed3855SMatthew Dillon 	 *
82391ed3855SMatthew Dillon 	 * We synchronize to save_undo_next_offset rather than
82491ed3855SMatthew Dillon 	 * cundomap->next_offset because that is what we flushed out
82591ed3855SMatthew Dillon 	 * above.
82691ed3855SMatthew Dillon 	 *
82791ed3855SMatthew Dillon 	 * NOTE! UNDOs can only be added with the sync_lock held
82891ed3855SMatthew Dillon 	 *	 so we can clear the undo history without racing.
82991ed3855SMatthew Dillon 	 *	 REDOs can be added at any time which is why we
83091ed3855SMatthew Dillon 	 *	 have to be careful and use save_undo_next_offset
83191ed3855SMatthew Dillon 	 *	 when setting the new first_offset.
8329f5097dcSMatthew Dillon 	 */
8339f5097dcSMatthew Dillon 	if (final) {
8349f5097dcSMatthew Dillon 		cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
83591ed3855SMatthew Dillon 		if (cundomap->first_offset != save_undo_next_offset) {
83691ed3855SMatthew Dillon 			cundomap->first_offset = save_undo_next_offset;
8371b0ab2c3SMatthew Dillon 			hmp->hflags |= HMNT_UNDO_DIRTY;
83891ed3855SMatthew Dillon 		} else if (cundomap->first_offset != cundomap->next_offset) {
83991ed3855SMatthew Dillon 			hmp->hflags |= HMNT_UNDO_DIRTY;
84091ed3855SMatthew Dillon 		} else {
84191ed3855SMatthew Dillon 			hmp->hflags &= ~HMNT_UNDO_DIRTY;
8421b0ab2c3SMatthew Dillon 		}
8439f5097dcSMatthew Dillon 		hammer_clear_undo_history(hmp);
8444889cbd4SMatthew Dillon 
8454889cbd4SMatthew Dillon 		/*
8464889cbd4SMatthew Dillon 		 * Flush tid sequencing.  flush_tid1 is fully synchronized,
8474889cbd4SMatthew Dillon 		 * meaning a crash will not roll it back.  flush_tid2 has
8484889cbd4SMatthew Dillon 		 * been written out asynchronously and a crash will roll
8494889cbd4SMatthew Dillon 		 * it back.  flush_tid1 is used for all mirroring masters.
8504889cbd4SMatthew Dillon 		 */
8514889cbd4SMatthew Dillon 		if (hmp->flush_tid1 != hmp->flush_tid2) {
8524889cbd4SMatthew Dillon 			hmp->flush_tid1 = hmp->flush_tid2;
8534889cbd4SMatthew Dillon 			wakeup(&hmp->flush_tid1);
8544889cbd4SMatthew Dillon 		}
8554889cbd4SMatthew Dillon 		hmp->flush_tid2 = trans->tid;
85647f363f1SMatthew Dillon 
85747f363f1SMatthew Dillon 		/*
85847f363f1SMatthew Dillon 		 * Clear the REDO SYNC flag.  This flag is used to ensure
85947f363f1SMatthew Dillon 		 * that the recovery span in the UNDO/REDO FIFO contains
86047f363f1SMatthew Dillon 		 * at least one REDO SYNC record.
86147f363f1SMatthew Dillon 		 */
86247f363f1SMatthew Dillon 		hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
8639f5097dcSMatthew Dillon 	}
8649f5097dcSMatthew Dillon 
865cdb6e4e6SMatthew Dillon 	/*
866cdb6e4e6SMatthew Dillon 	 * Cleanup.  Report any critical errors.
867cdb6e4e6SMatthew Dillon 	 */
868cdb6e4e6SMatthew Dillon failed:
8692f85fa4dSMatthew Dillon 	hammer_sync_unlock(trans);
8706c1f89f4SMatthew Dillon 
871cdb6e4e6SMatthew Dillon 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
872d053aa8aSTomohiro Kusumi 		hvkprintf(root_volume,
873d053aa8aSTomohiro Kusumi 			"Critical write error during flush, "
874d053aa8aSTomohiro Kusumi 			"refusing to sync UNDO FIFO\n");
875cdb6e4e6SMatthew Dillon 	}
876cdb6e4e6SMatthew Dillon 
8776c1f89f4SMatthew Dillon done:
8786c1f89f4SMatthew Dillon 	hammer_unlock(&hmp->flusher.finalize_lock);
8794889cbd4SMatthew Dillon 
8806c1f89f4SMatthew Dillon 	if (--hmp->flusher.finalize_want == 0)
8816c1f89f4SMatthew Dillon 		wakeup(&hmp->flusher.finalize_want);
882ce0138a6SMatthew Dillon 	hammer_stats_commits += final;
883059819e3SMatthew Dillon }
884059819e3SMatthew Dillon 
88506ad81ffSMatthew Dillon /*
8869192654cSMatthew Dillon  * Flush UNDOs.
8876048b411SMatthew Dillon  */
8886048b411SMatthew Dillon void
hammer_flusher_flush_undos(hammer_mount_t hmp,int mode)8899192654cSMatthew Dillon hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
8906048b411SMatthew Dillon {
8916048b411SMatthew Dillon 	hammer_io_t io;
8926048b411SMatthew Dillon 	int count;
8936048b411SMatthew Dillon 
8946048b411SMatthew Dillon 	count = 0;
8951afb73cfSMatthew Dillon 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
8966048b411SMatthew Dillon 		if (io->ioerror)
8976048b411SMatthew Dillon 			break;
8986048b411SMatthew Dillon 		hammer_ref(&io->lock);
8997fb33ff0STomohiro Kusumi 		KKASSERT(io->type != HAMMER_IOTYPE_VOLUME);
90091ed3855SMatthew Dillon 		hammer_io_write_interlock(io);
9016048b411SMatthew Dillon 		hammer_io_flush(io, hammer_undo_reclaim(io));
90291ed3855SMatthew Dillon 		hammer_io_done_interlock(io);
903195f6076STomohiro Kusumi 		hammer_rel_buffer(HAMMER_ITOB(io), 0);
904daaabaa0SMatthew Dillon 		hammer_io_limit_backlog(hmp);
9056048b411SMatthew Dillon 		++count;
9066048b411SMatthew Dillon 	}
9076048b411SMatthew Dillon 	hammer_flusher_clean_loose_ios(hmp);
9089192654cSMatthew Dillon 	if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
9099192654cSMatthew Dillon 	    (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
910eddadaeeSMatthew Dillon 		hammer_io_wait_all(hmp, "hmrfl1", 1);
911eddadaeeSMatthew Dillon 	} else {
912eddadaeeSMatthew Dillon 		hammer_io_wait_all(hmp, "hmrfl2", 0);
9136048b411SMatthew Dillon 	}
9149192654cSMatthew Dillon }
9156048b411SMatthew Dillon 
9166048b411SMatthew Dillon /*
91706ad81ffSMatthew Dillon  * Return non-zero if too many dirty meta-data buffers have built up.
91806ad81ffSMatthew Dillon  *
91906ad81ffSMatthew Dillon  * Since we cannot allow such buffers to flush until we have dealt with
92006ad81ffSMatthew Dillon  * the UNDOs, we risk deadlocking the kernel's buffer cache.
92106ad81ffSMatthew Dillon  */
92206ad81ffSMatthew Dillon int
hammer_flusher_meta_limit(hammer_mount_t hmp)92306ad81ffSMatthew Dillon hammer_flusher_meta_limit(hammer_mount_t hmp)
92406ad81ffSMatthew Dillon {
925f5a07a7aSMatthew Dillon 	if (hmp->locked_dirty_space + hmp->io_running_space >
926f5a07a7aSMatthew Dillon 	    hammer_limit_dirtybufspace) {
92706ad81ffSMatthew Dillon 		return(1);
92806ad81ffSMatthew Dillon 	}
92906ad81ffSMatthew Dillon 	return(0);
93006ad81ffSMatthew Dillon }
93106ad81ffSMatthew Dillon 
9321b0ab2c3SMatthew Dillon /*
9331b0ab2c3SMatthew Dillon  * Return non-zero if too many dirty meta-data buffers have built up.
9341b0ab2c3SMatthew Dillon  *
9351b0ab2c3SMatthew Dillon  * This version is used by background operations (mirror, prune, reblock)
9361b0ab2c3SMatthew Dillon  * to leave room for foreground operations.
9371b0ab2c3SMatthew Dillon  */
93893291532SMatthew Dillon int
hammer_flusher_meta_halflimit(hammer_mount_t hmp)93993291532SMatthew Dillon hammer_flusher_meta_halflimit(hammer_mount_t hmp)
94093291532SMatthew Dillon {
94193291532SMatthew Dillon 	if (hmp->locked_dirty_space + hmp->io_running_space >
94293291532SMatthew Dillon 	    hammer_limit_dirtybufspace / 2) {
94393291532SMatthew Dillon 		return(1);
94493291532SMatthew Dillon 	}
94593291532SMatthew Dillon 	return(0);
94693291532SMatthew Dillon }
94793291532SMatthew Dillon 
9481b0ab2c3SMatthew Dillon /*
9491b0ab2c3SMatthew Dillon  * Return non-zero if the flusher still has something to flush.
9501b0ab2c3SMatthew Dillon  */
9511b0ab2c3SMatthew Dillon int
hammer_flusher_haswork(hammer_mount_t hmp)9521b0ab2c3SMatthew Dillon hammer_flusher_haswork(hammer_mount_t hmp)
9531b0ab2c3SMatthew Dillon {
954c58123daSMatthew Dillon 	if (hmp->ronly)
955c58123daSMatthew Dillon 		return(0);
956cdb6e4e6SMatthew Dillon 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
957cdb6e4e6SMatthew Dillon 		return(0);
9581b0ab2c3SMatthew Dillon 	if (TAILQ_FIRST(&hmp->flush_group_list) ||	/* dirty inodes */
9591afb73cfSMatthew Dillon 	    RB_ROOT(&hmp->volu_root) ||			/* dirty buffers */
9601afb73cfSMatthew Dillon 	    RB_ROOT(&hmp->undo_root) ||
9611afb73cfSMatthew Dillon 	    RB_ROOT(&hmp->data_root) ||
9621afb73cfSMatthew Dillon 	    RB_ROOT(&hmp->meta_root) ||
9639a620123STomohiro Kusumi 	    (hmp->hflags & HMNT_UNDO_DIRTY)) {		/* UNDO FIFO sync */
9641b0ab2c3SMatthew Dillon 		return(1);
9651b0ab2c3SMatthew Dillon 	}
9661b0ab2c3SMatthew Dillon 	return(0);
9671b0ab2c3SMatthew Dillon }
9681b0ab2c3SMatthew Dillon 
9698bae937eSTomohiro Kusumi int
hammer_flush_dirty(hammer_mount_t hmp,int max_count)9708bae937eSTomohiro Kusumi hammer_flush_dirty(hammer_mount_t hmp, int max_count)
9718bae937eSTomohiro Kusumi {
9728bae937eSTomohiro Kusumi 	int count = 0;
9738bae937eSTomohiro Kusumi 	int dummy;
9748bae937eSTomohiro Kusumi 
9758bae937eSTomohiro Kusumi 	while (hammer_flusher_haswork(hmp)) {
9768bae937eSTomohiro Kusumi 		hammer_flusher_sync(hmp);
9778bae937eSTomohiro Kusumi 		++count;
9788bae937eSTomohiro Kusumi 		if (count >= 5) {
9798bae937eSTomohiro Kusumi 			if (count == 5)
980d053aa8aSTomohiro Kusumi 				hkprintf("flushing.");
9818bae937eSTomohiro Kusumi 			else
9828bae937eSTomohiro Kusumi 				kprintf(".");
9838bae937eSTomohiro Kusumi 			tsleep(&dummy, 0, "hmrufl", hz);
9848bae937eSTomohiro Kusumi 		}
9858bae937eSTomohiro Kusumi 		if (max_count != -1 && count == max_count) {
9868bae937eSTomohiro Kusumi 			kprintf("giving up");
9878bae937eSTomohiro Kusumi 			break;
9888bae937eSTomohiro Kusumi 		}
9898bae937eSTomohiro Kusumi 	}
9908bae937eSTomohiro Kusumi 	if (count >= 5)
9918bae937eSTomohiro Kusumi 		kprintf("\n");
9928bae937eSTomohiro Kusumi 
9938bae937eSTomohiro Kusumi 	if (count >= max_count)
9948bae937eSTomohiro Kusumi 		return(-1);
9958bae937eSTomohiro Kusumi 	return(0);
9968bae937eSTomohiro Kusumi }
997