xref: /openbsd-src/sys/kern/vfs_sync.c (revision 5bc652b1fc0fb53fbffea9975a1786d7967bb900)
1 /*       $OpenBSD: vfs_sync.c,v 1.31 2005/05/29 03:20:42 deraadt Exp $  */
2 
3 /*
4  *  Portions of this code are:
5  *
6  * Copyright (c) 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 /*
40  * Syncer daemon
41  */
42 
43 #include <sys/queue.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/malloc.h>
51 
52 #include <sys/kernel.h>
53 
54 #ifdef FFS_SOFTUPDATES
55 int   softdep_process_worklist(struct mount *);
56 #endif
57 
58 /*
59  * The workitem queue.
60  */
61 #define SYNCER_MAXDELAY	32		/* maximum sync delay time */
62 #define SYNCER_DEFAULT 30		/* default sync delay time */
63 int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
64 time_t syncdelay = SYNCER_DEFAULT;	/* time to delay syncing vnodes */
65 
66 int rushjob = 0;			/* number of slots to run ASAP */
67 int stat_rush_requests = 0;		/* number of rush requests */
68 
69 static int syncer_delayno = 0;
70 static long syncer_mask;
71 LIST_HEAD(synclist, vnode);
72 static struct synclist *syncer_workitem_pending;
73 
74 struct proc *syncerproc;
75 
76 /*
77  * The workitem queue.
78  *
79  * It is useful to delay writes of file data and filesystem metadata
80  * for tens of seconds so that quickly created and deleted files need
81  * not waste disk bandwidth being created and removed. To realize this,
82  * we append vnodes to a "workitem" queue. When running with a soft
83  * updates implementation, most pending metadata dependencies should
84  * not wait for more than a few seconds. Thus, mounted on block devices
85  * are delayed only about a half the time that file data is delayed.
86  * Similarly, directory updates are more critical, so are only delayed
87  * about a third the time that file data is delayed. Thus, there are
88  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
89  * one each second (driven off the filesystem syncer process). The
90  * syncer_delayno variable indicates the next queue that is to be processed.
91  * Items that need to be processed soon are placed in this queue:
92  *
93  *	syncer_workitem_pending[syncer_delayno]
94  *
95  * A delay of fifteen seconds is done by placing the request fifteen
96  * entries later in the queue:
97  *
98  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
99  *
100  */
101 
102 void
103 vn_initialize_syncerd()
104 
105 {
106 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, M_WAITOK,
107 	    &syncer_mask);
108 	syncer_maxdelay = syncer_mask + 1;
109 }
110 
111 /*
112  * Add an item to the syncer work queue.
113  */
114 void
115 vn_syncer_add_to_worklist(vp, delay)
116 	struct vnode *vp;
117 	int delay;
118 {
119 	int s, slot;
120 
121 	if (delay > syncer_maxdelay - 2)
122 		delay = syncer_maxdelay - 2;
123 	slot = (syncer_delayno + delay) & syncer_mask;
124 
125 	s = splbio();
126 	if (vp->v_bioflag & VBIOONSYNCLIST)
127 		LIST_REMOVE(vp, v_synclist);
128 
129 	vp->v_bioflag |= VBIOONSYNCLIST;
130 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
131 	splx(s);
132 }
133 
134 /*
135  * System filesystem synchronizer daemon.
136  */
137 
138 void
139 sched_sync(p)
140 	struct proc *p;
141 {
142 	struct synclist *slp;
143 	struct vnode *vp;
144 	long starttime;
145 	int s;
146 
147 	syncerproc = curproc;
148 
149 	for (;;) {
150 		starttime = time_second;
151 
152 		/*
153 		 * Push files whose dirty time has expired.
154 		 */
155 		s = splbio();
156 		slp = &syncer_workitem_pending[syncer_delayno];
157 
158 		syncer_delayno += 1;
159 		if (syncer_delayno == syncer_maxdelay)
160 			syncer_delayno = 0;
161 
162 		while ((vp = LIST_FIRST(slp)) != NULL) {
163 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, p) != 0) {
164 				/*
165 				 * If we fail to get the lock, we move this
166 				 * vnode one second ahead in time.
167 				 * XXX - no good, but the best we can do.
168 				 */
169 				vn_syncer_add_to_worklist(vp, 1);
170 				continue;
171 			}
172 			splx(s);
173 			(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
174 			VOP_UNLOCK(vp, 0, p);
175 			s = splbio();
176 			if (LIST_FIRST(slp) == vp) {
177 				/*
178 				 * Note: disk vps can remain on the
179 				 * worklist too with no dirty blocks, but
180 				 * since sync_fsync() moves it to a different
181 				 * slot we are safe.
182 				 */
183 				if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
184 				    vp->v_type != VBLK) {
185 					vprint("fsync failed", vp);
186 					if (vp->v_mount != NULL)
187 						printf("mounted on: %s\n",
188 						    vp->v_mount->mnt_stat.f_mntonname);
189 					panic("sched_sync: fsync failed");
190 				}
191 				/*
192 				 * Put us back on the worklist.  The worklist
193 				 * routine will remove us from our current
194 				 * position and then add us back in at a later
195 				 * position.
196 				 */
197 				vn_syncer_add_to_worklist(vp, syncdelay);
198 			}
199 		}
200 
201 		splx(s);
202 
203 #ifdef FFS_SOFTUPDATES
204 		/*
205 		 * Do soft update processing.
206 		 */
207 		softdep_process_worklist(NULL);
208 #endif
209 
210 		/*
211 		 * The variable rushjob allows the kernel to speed up the
212 		 * processing of the filesystem syncer process. A rushjob
213 		 * value of N tells the filesystem syncer to process the next
214 		 * N seconds worth of work on its queue ASAP. Currently rushjob
215 		 * is used by the soft update code to speed up the filesystem
216 		 * syncer process when the incore state is getting so far
217 		 * ahead of the disk that the kernel memory pool is being
218 		 * threatened with exhaustion.
219 		 */
220 		if (rushjob > 0) {
221 			rushjob -= 1;
222 			continue;
223 		}
224 		/*
225 		 * If it has taken us less than a second to process the
226 		 * current work, then wait. Otherwise start right over
227 		 * again. We can still lose time if any single round
228 		 * takes more than two seconds, but it does not really
229 		 * matter as we are just trying to generally pace the
230 		 * filesystem activity.
231 		 */
232 		if (time_second == starttime)
233 			tsleep(&lbolt, PPAUSE, "syncer", 0);
234 	}
235 }
236 
237 /*
238  * Request the syncer daemon to speed up its work.
239  * We never push it to speed up more than half of its
240  * normal turn time, otherwise it could take over the cpu.
241  */
242 int
243 speedup_syncer()
244 {
245 	int s;
246 
247 	s = splhigh();
248 	if (syncerproc && syncerproc->p_wchan == &lbolt)
249 		setrunnable(syncerproc);
250 	splx(s);
251 	if (rushjob < syncdelay / 2) {
252 		rushjob += 1;
253 		stat_rush_requests += 1;
254 		return 1;
255 	}
256 	return 0;
257 }
258 
259 /*
260  * Routine to create and manage a filesystem syncer vnode.
261  */
262 #define sync_close nullop
263 int   sync_fsync(void *);
264 int   sync_inactive(void *);
265 #define sync_reclaim nullop
266 #define sync_lock vop_generic_lock
267 #define sync_unlock vop_generic_unlock
268 int   sync_print(void *);
269 #define sync_islocked vop_generic_islocked
270 
271 int (**sync_vnodeop_p)(void *);
272 struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
273       { &vop_default_desc, vn_default_error },
274       { &vop_close_desc, sync_close },                /* close */
275       { &vop_fsync_desc, sync_fsync },                /* fsync */
276       { &vop_inactive_desc, sync_inactive },          /* inactive */
277       { &vop_reclaim_desc, sync_reclaim },            /* reclaim */
278       { &vop_lock_desc, sync_lock },                  /* lock */
279       { &vop_unlock_desc, sync_unlock },              /* unlock */
280       { &vop_print_desc, sync_print },                /* print */
281       { &vop_islocked_desc, sync_islocked },          /* islocked */
282       { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL }
283 };
284 struct vnodeopv_desc sync_vnodeop_opv_desc = {
285 	&sync_vnodeop_p, sync_vnodeop_entries
286 };
287 
288 /*
289  * Create a new filesystem syncer vnode for the specified mount point.
290  */
291 int
292 vfs_allocate_syncvnode(mp)
293 	struct mount *mp;
294 {
295 	struct vnode *vp;
296 	static long start, incr, next;
297 	int error;
298 
299 	/* Allocate a new vnode */
300 	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
301 		mp->mnt_syncer = NULL;
302 		return (error);
303 	}
304 	vp->v_writecount = 1;
305 	vp->v_type = VNON;
306 	/*
307 	 * Place the vnode onto the syncer worklist. We attempt to
308 	 * scatter them about on the list so that they will go off
309 	 * at evenly distributed times even if all the filesystems
310 	 * are mounted at once.
311 	 */
312 	next += incr;
313 	if (next == 0 || next > syncer_maxdelay) {
314 		start /= 2;
315 		incr /= 2;
316 		if (start == 0) {
317 			start = syncer_maxdelay / 2;
318 			incr = syncer_maxdelay;
319 		}
320 		next = start;
321 	}
322 	vn_syncer_add_to_worklist(vp, next);
323 	mp->mnt_syncer = vp;
324 	return (0);
325 }
326 
327 /*
328  * Do a lazy sync of the filesystem.
329  */
330 int
331 sync_fsync(v)
332 	void *v;
333 {
334 	struct vop_fsync_args /* {
335 		struct vnodeop_desc *a_desc;
336 		struct vnode *a_vp;
337 		struct ucred *a_cred;
338 		int a_waitfor;
339 		struct proc *a_p;
340 	} */ *ap = v;
341 	struct vnode *syncvp = ap->a_vp;
342 	struct mount *mp = syncvp->v_mount;
343 	int asyncflag;
344 
345 	/*
346 	 * We only need to do something if this is a lazy evaluation.
347 	 */
348 	if (ap->a_waitfor != MNT_LAZY)
349 		return (0);
350 
351 	/*
352 	 * Move ourselves to the back of the sync list.
353 	 */
354 	vn_syncer_add_to_worklist(syncvp, syncdelay);
355 
356 	/*
357 	 * Walk the list of vnodes pushing all that are dirty and
358 	 * not already on the sync list.
359 	 */
360 	simple_lock(&mountlist_slock);
361 	if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p) == 0) {
362 		asyncflag = mp->mnt_flag & MNT_ASYNC;
363 		mp->mnt_flag &= ~MNT_ASYNC;
364 		VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p);
365 		if (asyncflag)
366 			mp->mnt_flag |= MNT_ASYNC;
367 		vfs_unbusy(mp, ap->a_p);
368 	} else
369 		simple_unlock(&mountlist_slock);
370 
371 	return (0);
372 }
373 
374 /*
375  * The syncer vnode is no longer needed and is being decommissioned.
376  */
377 int
378 sync_inactive(v)
379 	void *v;
380 {
381 	struct vop_inactive_args /* {
382 		struct vnodeop_desc *a_desc;
383 		struct vnode *a_vp;
384 		struct proc *a_p;
385 	} */ *ap = v;
386 
387 	struct vnode *vp = ap->a_vp;
388 	int s;
389 
390 	if (vp->v_usecount == 0) {
391 		VOP_UNLOCK(vp, 0, ap->a_p);
392 		return (0);
393 	}
394 
395 	vp->v_mount->mnt_syncer = NULL;
396 
397 	s = splbio();
398 
399 	LIST_REMOVE(vp, v_synclist);
400 	vp->v_bioflag &= ~VBIOONSYNCLIST;
401 
402 	splx(s);
403 
404 	vp->v_writecount = 0;
405 	vput(vp);
406 
407 	return (0);
408 }
409 
410 /*
411  * Print out a syncer vnode.
412  */
413 int
414 sync_print(v)
415 	void *v;
416 
417 {
418 	struct vop_print_args /* {
419 		struct vnodeop_desc *a_desc;
420 		struct vnode *a_vp;
421 	} */ *ap = v;
422 	struct vnode *vp = ap->a_vp;
423 
424 	printf("syncer vnode");
425 	if (vp->v_vnlock != NULL)
426 		lockmgr_printinfo(vp->v_vnlock);
427 	printf("\n");
428 	return (0);
429 }
430