xref: /openbsd-src/sys/kern/vfs_sync.c (revision 793a9d87c5a69b5fdce00c201989dfa857ccc53c)
1 /*       $OpenBSD: vfs_sync.c,v 1.32 2005/05/31 11:35:33 art Exp $  */
2 
3 /*
4  *  Portions of this code are:
5  *
6  * Copyright (c) 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 /*
40  * Syncer daemon
41  */
42 
43 #include <sys/queue.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/malloc.h>
51 
52 #include <sys/kernel.h>
53 #include <sys/sched.h>
54 
55 #ifdef FFS_SOFTUPDATES
56 int   softdep_process_worklist(struct mount *);
57 #endif
58 
59 /*
60  * The workitem queue.
61  */
62 #define SYNCER_MAXDELAY	32		/* maximum sync delay time */
63 #define SYNCER_DEFAULT 30		/* default sync delay time */
64 int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
65 time_t syncdelay = SYNCER_DEFAULT;	/* time to delay syncing vnodes */
66 
67 int rushjob = 0;			/* number of slots to run ASAP */
68 int stat_rush_requests = 0;		/* number of rush requests */
69 
70 static int syncer_delayno = 0;
71 static long syncer_mask;
72 LIST_HEAD(synclist, vnode);
73 static struct synclist *syncer_workitem_pending;
74 
75 struct proc *syncerproc;
76 
77 /*
78  * The workitem queue.
79  *
80  * It is useful to delay writes of file data and filesystem metadata
81  * for tens of seconds so that quickly created and deleted files need
82  * not waste disk bandwidth being created and removed. To realize this,
83  * we append vnodes to a "workitem" queue. When running with a soft
84  * updates implementation, most pending metadata dependencies should
85  * not wait for more than a few seconds. Thus, mounted on block devices
86  * are delayed only about a half the time that file data is delayed.
87  * Similarly, directory updates are more critical, so are only delayed
88  * about a third the time that file data is delayed. Thus, there are
89  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
90  * one each second (driven off the filesystem syncer process). The
91  * syncer_delayno variable indicates the next queue that is to be processed.
92  * Items that need to be processed soon are placed in this queue:
93  *
94  *	syncer_workitem_pending[syncer_delayno]
95  *
96  * A delay of fifteen seconds is done by placing the request fifteen
97  * entries later in the queue:
98  *
99  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
100  *
101  */
102 
103 void
104 vn_initialize_syncerd()
105 
106 {
107 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, M_WAITOK,
108 	    &syncer_mask);
109 	syncer_maxdelay = syncer_mask + 1;
110 }
111 
112 /*
113  * Add an item to the syncer work queue.
114  */
115 void
116 vn_syncer_add_to_worklist(vp, delay)
117 	struct vnode *vp;
118 	int delay;
119 {
120 	int s, slot;
121 
122 	if (delay > syncer_maxdelay - 2)
123 		delay = syncer_maxdelay - 2;
124 	slot = (syncer_delayno + delay) & syncer_mask;
125 
126 	s = splbio();
127 	if (vp->v_bioflag & VBIOONSYNCLIST)
128 		LIST_REMOVE(vp, v_synclist);
129 
130 	vp->v_bioflag |= VBIOONSYNCLIST;
131 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
132 	splx(s);
133 }
134 
135 /*
136  * System filesystem synchronizer daemon.
137  */
138 
139 void
140 sched_sync(p)
141 	struct proc *p;
142 {
143 	struct synclist *slp;
144 	struct vnode *vp;
145 	long starttime;
146 	int s;
147 
148 	syncerproc = curproc;
149 
150 	for (;;) {
151 		starttime = time_second;
152 
153 		/*
154 		 * Push files whose dirty time has expired.
155 		 */
156 		s = splbio();
157 		slp = &syncer_workitem_pending[syncer_delayno];
158 
159 		syncer_delayno += 1;
160 		if (syncer_delayno == syncer_maxdelay)
161 			syncer_delayno = 0;
162 
163 		while ((vp = LIST_FIRST(slp)) != NULL) {
164 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, p) != 0) {
165 				/*
166 				 * If we fail to get the lock, we move this
167 				 * vnode one second ahead in time.
168 				 * XXX - no good, but the best we can do.
169 				 */
170 				vn_syncer_add_to_worklist(vp, 1);
171 				continue;
172 			}
173 			splx(s);
174 			(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
175 			VOP_UNLOCK(vp, 0, p);
176 			s = splbio();
177 			if (LIST_FIRST(slp) == vp) {
178 				/*
179 				 * Note: disk vps can remain on the
180 				 * worklist too with no dirty blocks, but
181 				 * since sync_fsync() moves it to a different
182 				 * slot we are safe.
183 				 */
184 				if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
185 				    vp->v_type != VBLK) {
186 					vprint("fsync failed", vp);
187 					if (vp->v_mount != NULL)
188 						printf("mounted on: %s\n",
189 						    vp->v_mount->mnt_stat.f_mntonname);
190 					panic("sched_sync: fsync failed");
191 				}
192 				/*
193 				 * Put us back on the worklist.  The worklist
194 				 * routine will remove us from our current
195 				 * position and then add us back in at a later
196 				 * position.
197 				 */
198 				vn_syncer_add_to_worklist(vp, syncdelay);
199 			}
200 		}
201 
202 		splx(s);
203 
204 #ifdef FFS_SOFTUPDATES
205 		/*
206 		 * Do soft update processing.
207 		 */
208 		softdep_process_worklist(NULL);
209 #endif
210 
211 		/*
212 		 * The variable rushjob allows the kernel to speed up the
213 		 * processing of the filesystem syncer process. A rushjob
214 		 * value of N tells the filesystem syncer to process the next
215 		 * N seconds worth of work on its queue ASAP. Currently rushjob
216 		 * is used by the soft update code to speed up the filesystem
217 		 * syncer process when the incore state is getting so far
218 		 * ahead of the disk that the kernel memory pool is being
219 		 * threatened with exhaustion.
220 		 */
221 		if (rushjob > 0) {
222 			rushjob -= 1;
223 			continue;
224 		}
225 		/*
226 		 * If it has taken us less than a second to process the
227 		 * current work, then wait. Otherwise start right over
228 		 * again. We can still lose time if any single round
229 		 * takes more than two seconds, but it does not really
230 		 * matter as we are just trying to generally pace the
231 		 * filesystem activity.
232 		 */
233 		if (time_second == starttime)
234 			tsleep(&lbolt, PPAUSE, "syncer", 0);
235 	}
236 }
237 
238 /*
239  * Request the syncer daemon to speed up its work.
240  * We never push it to speed up more than half of its
241  * normal turn time, otherwise it could take over the cpu.
242  */
243 int
244 speedup_syncer()
245 {
246 	int s;
247 
248 	SCHED_LOCK(s);
249 	if (syncerproc && syncerproc->p_wchan == &lbolt)
250 		setrunnable(syncerproc);
251 	SCHED_UNLOCK(s);
252 	if (rushjob < syncdelay / 2) {
253 		rushjob += 1;
254 		stat_rush_requests += 1;
255 		return 1;
256 	}
257 	return 0;
258 }
259 
260 /*
261  * Routine to create and manage a filesystem syncer vnode.
262  */
263 #define sync_close nullop
264 int   sync_fsync(void *);
265 int   sync_inactive(void *);
266 #define sync_reclaim nullop
267 #define sync_lock vop_generic_lock
268 #define sync_unlock vop_generic_unlock
269 int   sync_print(void *);
270 #define sync_islocked vop_generic_islocked
271 
272 int (**sync_vnodeop_p)(void *);
273 struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
274       { &vop_default_desc, vn_default_error },
275       { &vop_close_desc, sync_close },                /* close */
276       { &vop_fsync_desc, sync_fsync },                /* fsync */
277       { &vop_inactive_desc, sync_inactive },          /* inactive */
278       { &vop_reclaim_desc, sync_reclaim },            /* reclaim */
279       { &vop_lock_desc, sync_lock },                  /* lock */
280       { &vop_unlock_desc, sync_unlock },              /* unlock */
281       { &vop_print_desc, sync_print },                /* print */
282       { &vop_islocked_desc, sync_islocked },          /* islocked */
283       { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL }
284 };
285 struct vnodeopv_desc sync_vnodeop_opv_desc = {
286 	&sync_vnodeop_p, sync_vnodeop_entries
287 };
288 
289 /*
290  * Create a new filesystem syncer vnode for the specified mount point.
291  */
292 int
293 vfs_allocate_syncvnode(mp)
294 	struct mount *mp;
295 {
296 	struct vnode *vp;
297 	static long start, incr, next;
298 	int error;
299 
300 	/* Allocate a new vnode */
301 	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
302 		mp->mnt_syncer = NULL;
303 		return (error);
304 	}
305 	vp->v_writecount = 1;
306 	vp->v_type = VNON;
307 	/*
308 	 * Place the vnode onto the syncer worklist. We attempt to
309 	 * scatter them about on the list so that they will go off
310 	 * at evenly distributed times even if all the filesystems
311 	 * are mounted at once.
312 	 */
313 	next += incr;
314 	if (next == 0 || next > syncer_maxdelay) {
315 		start /= 2;
316 		incr /= 2;
317 		if (start == 0) {
318 			start = syncer_maxdelay / 2;
319 			incr = syncer_maxdelay;
320 		}
321 		next = start;
322 	}
323 	vn_syncer_add_to_worklist(vp, next);
324 	mp->mnt_syncer = vp;
325 	return (0);
326 }
327 
328 /*
329  * Do a lazy sync of the filesystem.
330  */
331 int
332 sync_fsync(v)
333 	void *v;
334 {
335 	struct vop_fsync_args /* {
336 		struct vnodeop_desc *a_desc;
337 		struct vnode *a_vp;
338 		struct ucred *a_cred;
339 		int a_waitfor;
340 		struct proc *a_p;
341 	} */ *ap = v;
342 	struct vnode *syncvp = ap->a_vp;
343 	struct mount *mp = syncvp->v_mount;
344 	int asyncflag;
345 
346 	/*
347 	 * We only need to do something if this is a lazy evaluation.
348 	 */
349 	if (ap->a_waitfor != MNT_LAZY)
350 		return (0);
351 
352 	/*
353 	 * Move ourselves to the back of the sync list.
354 	 */
355 	vn_syncer_add_to_worklist(syncvp, syncdelay);
356 
357 	/*
358 	 * Walk the list of vnodes pushing all that are dirty and
359 	 * not already on the sync list.
360 	 */
361 	simple_lock(&mountlist_slock);
362 	if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p) == 0) {
363 		asyncflag = mp->mnt_flag & MNT_ASYNC;
364 		mp->mnt_flag &= ~MNT_ASYNC;
365 		VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p);
366 		if (asyncflag)
367 			mp->mnt_flag |= MNT_ASYNC;
368 		vfs_unbusy(mp, ap->a_p);
369 	} else
370 		simple_unlock(&mountlist_slock);
371 
372 	return (0);
373 }
374 
375 /*
376  * The syncer vnode is no longer needed and is being decommissioned.
377  */
378 int
379 sync_inactive(v)
380 	void *v;
381 {
382 	struct vop_inactive_args /* {
383 		struct vnodeop_desc *a_desc;
384 		struct vnode *a_vp;
385 		struct proc *a_p;
386 	} */ *ap = v;
387 
388 	struct vnode *vp = ap->a_vp;
389 	int s;
390 
391 	if (vp->v_usecount == 0) {
392 		VOP_UNLOCK(vp, 0, ap->a_p);
393 		return (0);
394 	}
395 
396 	vp->v_mount->mnt_syncer = NULL;
397 
398 	s = splbio();
399 
400 	LIST_REMOVE(vp, v_synclist);
401 	vp->v_bioflag &= ~VBIOONSYNCLIST;
402 
403 	splx(s);
404 
405 	vp->v_writecount = 0;
406 	vput(vp);
407 
408 	return (0);
409 }
410 
411 /*
412  * Print out a syncer vnode.
413  */
414 int
415 sync_print(v)
416 	void *v;
417 
418 {
419 	struct vop_print_args /* {
420 		struct vnodeop_desc *a_desc;
421 		struct vnode *a_vp;
422 	} */ *ap = v;
423 	struct vnode *vp = ap->a_vp;
424 
425 	printf("syncer vnode");
426 	if (vp->v_vnlock != NULL)
427 		lockmgr_printinfo(vp->v_vnlock);
428 	printf("\n");
429 	return (0);
430 }
431