1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 #include <sys/thread2.h> 79 80 /* 81 * The workitem queue. 82 */ 83 #define SYNCER_MAXDELAY 32 84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS); 85 time_t syncdelay = 30; /* max time to delay syncing data */ 86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay"); 88 time_t filedelay = 30; /* time to delay syncing files */ 89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 90 &filedelay, 0, "File synchronization delay"); 91 time_t dirdelay = 29; /* time to delay syncing directories */ 92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 93 &dirdelay, 0, "Directory synchronization delay"); 94 time_t metadelay = 28; /* time to delay syncing metadata */ 95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 96 &metadelay, 0, "VFS metadata synchronization delay"); 97 time_t retrydelay = 1; /* retry delay after failure */ 98 SYSCTL_INT(_kern, OID_AUTO, retrydelay, CTLFLAG_RW, 99 &retrydelay, 0, "VFS retry synchronization delay"); 100 static int rushjob; /* number of slots to run ASAP */ 101 static int stat_rush_requests; /* number of times I/O speeded up */ 102 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 103 &stat_rush_requests, 0, ""); 104 105 LIST_HEAD(synclist, vnode); 106 107 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 108 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 109 110 struct syncer_ctx { 111 struct mount *sc_mp; 112 struct lwkt_token sc_token; 113 struct thread *sc_thread; 114 int sc_flags; 115 struct synclist *syncer_workitem_pending; 116 long syncer_mask; 117 int syncer_delayno; 118 int syncer_forced; 119 int syncer_rushjob; /* sequence vnodes faster */ 120 int syncer_trigger; /* trigger full sync */ 121 long syncer_count; 122 }; 123 124 static void syncer_thread(void *); 125 126 static int 127 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS) 128 { 129 int error; 130 int v = syncdelay; 131 132 error = sysctl_handle_int(oidp, &v, 0, req); 133 if (error || !req->newptr) 134 return (error); 135 if (v < 1) 136 v = 1; 137 if (v > SYNCER_MAXDELAY) 138 v = SYNCER_MAXDELAY; 139 syncdelay = v; 140 141 return(0); 142 } 143 144 /* 145 * The workitem queue. 146 * 147 * It is useful to delay writes of file data and filesystem metadata 148 * for tens of seconds so that quickly created and deleted files need 149 * not waste disk bandwidth being created and removed. To realize this, 150 * we append vnodes to a "workitem" queue. When running with a soft 151 * updates implementation, most pending metadata dependencies should 152 * not wait for more than a few seconds. Thus, mounted on block devices 153 * are delayed only about a half the time that file data is delayed. 154 * Similarly, directory updates are more critical, so are only delayed 155 * about a third the time that file data is delayed. Thus, there are 156 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 157 * one each second (driven off the filesystem syncer process). The 158 * syncer_delayno variable indicates the next queue that is to be processed. 159 * Items that need to be processed soon are placed in this queue: 160 * 161 * syncer_workitem_pending[syncer_delayno] 162 * 163 * A delay of fifteen seconds is done by placing the request fifteen 164 * entries later in the queue: 165 * 166 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 167 * 168 */ 169 170 /* 171 * Return the number of vnodes on the syncer's timed list. This will 172 * include the syncer vnode (mp->mnt_syncer) so if used, a minimum 173 * value of 1 will be returned. 174 */ 175 long 176 vn_syncer_count(struct mount *mp) 177 { 178 struct syncer_ctx *ctx; 179 180 ctx = mp->mnt_syncer_ctx; 181 if (ctx) 182 return (ctx->syncer_count); 183 return 0; 184 } 185 186 /* 187 * Add an item to the syncer work queue. 188 * 189 * WARNING: Cannot get vp->v_token here if not already held, we must 190 * depend on the syncer_token (which might already be held by 191 * the caller) to protect v_synclist and VONWORKLST. 192 * 193 * WARNING: The syncer depends on this function not blocking if the caller 194 * already holds the syncer token. 195 */ 196 void 197 vn_syncer_add(struct vnode *vp, int delay) 198 { 199 struct syncer_ctx *ctx; 200 int slot; 201 202 ctx = vp->v_mount->mnt_syncer_ctx; 203 lwkt_gettoken(&ctx->sc_token); 204 205 if (vp->v_flag & VONWORKLST) { 206 LIST_REMOVE(vp, v_synclist); 207 --ctx->syncer_count; 208 } 209 if (delay <= 0) { 210 slot = -delay & ctx->syncer_mask; 211 } else { 212 if (delay > SYNCER_MAXDELAY - 2) 213 delay = SYNCER_MAXDELAY - 2; 214 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 215 } 216 217 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 218 vsetflags(vp, VONWORKLST); 219 ++ctx->syncer_count; 220 221 lwkt_reltoken(&ctx->sc_token); 222 } 223 224 /* 225 * Removes the vnode from the syncer list. Since we might block while 226 * acquiring the syncer_token we have to [re]check conditions to determine 227 * that it is ok to remove the vnode. 228 * 229 * Force removal if force != 0. This can only occur during a forced unmount. 230 * 231 * vp->v_token held on call 232 */ 233 void 234 vn_syncer_remove(struct vnode *vp, int force) 235 { 236 struct syncer_ctx *ctx; 237 238 ctx = vp->v_mount->mnt_syncer_ctx; 239 lwkt_gettoken(&ctx->sc_token); 240 241 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST && 242 RB_EMPTY(&vp->v_rbdirty_tree)) { 243 vclrflags(vp, VONWORKLST); 244 LIST_REMOVE(vp, v_synclist); 245 --ctx->syncer_count; 246 } else if (force && (vp->v_flag & VONWORKLST)) { 247 vclrflags(vp, VONWORKLST); 248 LIST_REMOVE(vp, v_synclist); 249 --ctx->syncer_count; 250 } 251 252 lwkt_reltoken(&ctx->sc_token); 253 } 254 255 /* 256 * vnode must be locked 257 */ 258 void 259 vclrisdirty(struct vnode *vp) 260 { 261 vclrflags(vp, VISDIRTY); 262 if (vp->v_flag & VONWORKLST) 263 vn_syncer_remove(vp, 0); 264 } 265 266 void 267 vclrobjdirty(struct vnode *vp) 268 { 269 vclrflags(vp, VOBJDIRTY); 270 if (vp->v_flag & VONWORKLST) 271 vn_syncer_remove(vp, 0); 272 } 273 274 /* 275 * vnode must be stable 276 */ 277 void 278 vsetisdirty(struct vnode *vp) 279 { 280 struct syncer_ctx *ctx; 281 282 if ((vp->v_flag & VISDIRTY) == 0) { 283 ctx = vp->v_mount->mnt_syncer_ctx; 284 vsetflags(vp, VISDIRTY); 285 lwkt_gettoken(&ctx->sc_token); 286 if ((vp->v_flag & VONWORKLST) == 0) 287 vn_syncer_add(vp, syncdelay); 288 lwkt_reltoken(&ctx->sc_token); 289 } 290 } 291 292 void 293 vsetobjdirty(struct vnode *vp) 294 { 295 struct syncer_ctx *ctx; 296 297 if ((vp->v_flag & VOBJDIRTY) == 0) { 298 ctx = vp->v_mount->mnt_syncer_ctx; 299 vsetflags(vp, VOBJDIRTY); 300 lwkt_gettoken(&ctx->sc_token); 301 if ((vp->v_flag & VONWORKLST) == 0) 302 vn_syncer_add(vp, syncdelay); 303 lwkt_reltoken(&ctx->sc_token); 304 } 305 } 306 307 /* 308 * Create per-filesystem syncer process 309 */ 310 void 311 vn_syncer_thr_create(struct mount *mp) 312 { 313 struct syncer_ctx *ctx; 314 static int syncalloc = 0; 315 316 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO); 317 ctx->sc_mp = mp; 318 ctx->sc_flags = 0; 319 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF, 320 &ctx->syncer_mask); 321 ctx->syncer_delayno = 0; 322 lwkt_token_init(&ctx->sc_token, "syncer"); 323 mp->mnt_syncer_ctx = ctx; 324 kthread_create(syncer_thread, ctx, &ctx->sc_thread, 325 "syncer%d", ++syncalloc & 0x7FFFFFFF); 326 } 327 328 /* 329 * Stop per-filesystem syncer process 330 */ 331 void 332 vn_syncer_thr_stop(struct mount *mp) 333 { 334 struct syncer_ctx *ctx; 335 336 ctx = mp->mnt_syncer_ctx; 337 if (ctx == NULL) 338 return; 339 340 lwkt_gettoken(&ctx->sc_token); 341 342 /* Signal the syncer process to exit */ 343 ctx->sc_flags |= SC_FLAG_EXIT; 344 wakeup(ctx); 345 346 /* Wait till syncer process exits */ 347 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 348 tsleep(&ctx->sc_flags, 0, "syncexit", hz); 349 350 mp->mnt_syncer_ctx = NULL; 351 lwkt_reltoken(&ctx->sc_token); 352 353 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 354 kfree(ctx, M_TEMP); 355 } 356 357 struct thread *updatethread; 358 359 /* 360 * System filesystem synchronizer daemon. 361 */ 362 static void 363 syncer_thread(void *_ctx) 364 { 365 struct syncer_ctx *ctx = _ctx; 366 struct synclist *slp; 367 struct vnode *vp; 368 long starttime; 369 int *sc_flagsp; 370 int sc_flags; 371 int vnodes_synced = 0; 372 int delta; 373 int dummy = 0; 374 375 for (;;) { 376 kproc_suspend_loop(); 377 378 starttime = time_uptime; 379 lwkt_gettoken(&ctx->sc_token); 380 381 /* 382 * Push files whose dirty time has expired. Be careful 383 * of interrupt race on slp queue. 384 * 385 * Note that vsyncscan() and vn_syncer_one() can pull items 386 * off the same list, so we shift vp's position in the 387 * list immediately. 388 */ 389 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 390 391 /* 392 * If syncer_trigger is set (from trigger_syncer(mp)), 393 * Immediately do a full filesystem sync. 394 */ 395 if (ctx->syncer_trigger) { 396 ctx->syncer_trigger = 0; 397 if (ctx->sc_mp && ctx->sc_mp->mnt_syncer) { 398 vp = ctx->sc_mp->mnt_syncer; 399 if (vp->v_flag & VONWORKLST) { 400 vn_syncer_add(vp, retrydelay); 401 if (vget(vp, LK_EXCLUSIVE) == 0) { 402 VOP_FSYNC(vp, MNT_LAZY, 0); 403 vput(vp); 404 vnodes_synced++; 405 } 406 } 407 } 408 } 409 410 while ((vp = LIST_FIRST(slp)) != NULL) { 411 vn_syncer_add(vp, retrydelay); 412 if (ctx->syncer_forced) { 413 if (vget(vp, LK_EXCLUSIVE) == 0) { 414 VOP_FSYNC(vp, MNT_NOWAIT, 0); 415 vput(vp); 416 vnodes_synced++; 417 } 418 } else { 419 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 420 VOP_FSYNC(vp, MNT_LAZY, 0); 421 vput(vp); 422 vnodes_synced++; 423 } 424 } 425 } 426 427 /* 428 * Increment the slot upon completion. 429 */ 430 ctx->syncer_delayno = (ctx->syncer_delayno + 1) & 431 ctx->syncer_mask; 432 433 sc_flags = ctx->sc_flags; 434 435 /* Exit on unmount */ 436 if (sc_flags & SC_FLAG_EXIT) 437 break; 438 439 lwkt_reltoken(&ctx->sc_token); 440 441 /* 442 * Do sync processing for each mount. 443 */ 444 if (ctx->sc_mp) 445 bio_ops_sync(ctx->sc_mp); 446 447 /* 448 * The variable rushjob allows the kernel to speed up the 449 * processing of the filesystem syncer process. A rushjob 450 * value of N tells the filesystem syncer to process the next 451 * N seconds worth of work on its queue ASAP. Currently rushjob 452 * is used by the soft update code to speed up the filesystem 453 * syncer process when the incore state is getting so far 454 * ahead of the disk that the kernel memory pool is being 455 * threatened with exhaustion. 456 */ 457 delta = rushjob - ctx->syncer_rushjob; 458 if ((u_int)delta > syncdelay / 2) { 459 ctx->syncer_rushjob = rushjob - syncdelay / 2; 460 tsleep(&dummy, 0, "rush", 1); 461 continue; 462 } 463 if (delta) { 464 ++ctx->syncer_rushjob; 465 tsleep(&dummy, 0, "rush", 1); 466 continue; 467 } 468 469 /* 470 * If it has taken us less than a second to process the 471 * current work, then wait. Otherwise start right over 472 * again. We can still lose time if any single round 473 * takes more than two seconds, but it does not really 474 * matter as we are just trying to generally pace the 475 * filesystem activity. 476 */ 477 if (time_uptime == starttime) 478 tsleep(ctx, 0, "syncer", hz); 479 } 480 481 /* 482 * Unmount/exit path for per-filesystem syncers; sc_token held 483 */ 484 ctx->sc_flags |= SC_FLAG_DONE; 485 sc_flagsp = &ctx->sc_flags; 486 lwkt_reltoken(&ctx->sc_token); 487 wakeup(sc_flagsp); 488 489 kthread_exit(); 490 } 491 492 /* 493 * This allows a filesystem to pro-actively request that a dirty 494 * vnode be fsync()d. This routine does not guarantee that one 495 * will actually be fsynced. 496 */ 497 void 498 vn_syncer_one(struct mount *mp) 499 { 500 struct syncer_ctx *ctx; 501 struct synclist *slp; 502 struct vnode *vp; 503 int i; 504 int n = syncdelay; 505 506 ctx = mp->mnt_syncer_ctx; 507 i = ctx->syncer_delayno & ctx->syncer_mask; 508 cpu_ccfence(); 509 510 if (lwkt_trytoken(&ctx->sc_token) == 0) 511 return; 512 513 /* 514 * Look ahead on our syncer time array. 515 */ 516 do { 517 slp = &ctx->syncer_workitem_pending[i]; 518 vp = LIST_FIRST(slp); 519 if (vp && vp->v_type == VNON) 520 vp = LIST_NEXT(vp, v_synclist); 521 if (vp) 522 break; 523 i = (i + 1) & ctx->syncer_mask; 524 /* i will be wrong if we stop here but vp is NULL so ok */ 525 } while(--n); 526 527 /* 528 * Process one vnode, skip the syncer vnode but also stop 529 * if the syncer vnode is the only thing on this list. 530 */ 531 if (vp) { 532 vn_syncer_add(vp, retrydelay); 533 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 534 VOP_FSYNC(vp, MNT_LAZY, 0); 535 vput(vp); 536 } 537 } 538 lwkt_reltoken(&ctx->sc_token); 539 } 540 541 /* 542 * Request that the syncer daemon for a specific mount speed up its work. 543 * If mp is NULL the caller generally wants to speed up all syncers. 544 */ 545 void 546 speedup_syncer(struct mount *mp) 547 { 548 /* 549 * Don't bother protecting the test. unsleep_and_wakeup_thread() 550 * will only do something real if the thread is in the right state. 551 */ 552 atomic_add_int(&rushjob, 1); 553 ++stat_rush_requests; 554 if (mp && mp->mnt_syncer_ctx) 555 wakeup(mp->mnt_syncer_ctx); 556 } 557 558 /* 559 * trigger a full sync 560 */ 561 void 562 trigger_syncer(struct mount *mp) 563 { 564 struct syncer_ctx *ctx; 565 566 if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) { 567 if (ctx->syncer_trigger == 0) { 568 ctx->syncer_trigger = 1; 569 wakeup(ctx); 570 } 571 } 572 } 573 574 /* 575 * Routine to create and manage a filesystem syncer vnode. 576 */ 577 static int sync_close(struct vop_close_args *); 578 static int sync_fsync(struct vop_fsync_args *); 579 static int sync_inactive(struct vop_inactive_args *); 580 static int sync_reclaim (struct vop_reclaim_args *); 581 static int sync_print(struct vop_print_args *); 582 583 static struct vop_ops sync_vnode_vops = { 584 .vop_default = vop_eopnotsupp, 585 .vop_close = sync_close, 586 .vop_fsync = sync_fsync, 587 .vop_inactive = sync_inactive, 588 .vop_reclaim = sync_reclaim, 589 .vop_print = sync_print, 590 }; 591 592 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 593 594 VNODEOP_SET(sync_vnode_vops); 595 596 /* 597 * Create a new filesystem syncer vnode for the specified mount point. 598 * This vnode is placed on the worklist and is responsible for sync'ing 599 * the filesystem. 600 * 601 * NOTE: read-only mounts are also placed on the worklist. The filesystem 602 * sync code is also responsible for cleaning up vnodes. 603 */ 604 int 605 vfs_allocate_syncvnode(struct mount *mp) 606 { 607 struct vnode *vp; 608 static long start, incr, next; 609 int error; 610 611 /* Allocate a new vnode */ 612 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 613 if (error) { 614 mp->mnt_syncer = NULL; 615 return (error); 616 } 617 vp->v_type = VNON; 618 /* 619 * Place the vnode onto the syncer worklist. We attempt to 620 * scatter them about on the list so that they will go off 621 * at evenly distributed times even if all the filesystems 622 * are mounted at once. 623 */ 624 next += incr; 625 if (next == 0 || next > SYNCER_MAXDELAY) { 626 start /= 2; 627 incr /= 2; 628 if (start == 0) { 629 start = SYNCER_MAXDELAY / 2; 630 incr = SYNCER_MAXDELAY; 631 } 632 next = start; 633 } 634 635 /* 636 * Only put the syncer vnode onto the syncer list if we have a 637 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer 638 * thread. 639 */ 640 if (mp->mnt_syncer_ctx) 641 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 642 643 /* 644 * The mnt_syncer field inherits the vnode reference, which is 645 * held until later decomissioning. 646 */ 647 mp->mnt_syncer = vp; 648 vx_unlock(vp); 649 return (0); 650 } 651 652 static int 653 sync_close(struct vop_close_args *ap) 654 { 655 return (0); 656 } 657 658 /* 659 * Do a lazy sync of the filesystem. 660 * 661 * sync_fsync { struct vnode *a_vp, int a_waitfor } 662 */ 663 static int 664 sync_fsync(struct vop_fsync_args *ap) 665 { 666 struct vnode *syncvp = ap->a_vp; 667 struct mount *mp = syncvp->v_mount; 668 int asyncflag; 669 670 /* 671 * We only need to do something if this is a lazy evaluation. 672 */ 673 if ((ap->a_waitfor & MNT_LAZY) == 0) 674 return (0); 675 676 /* 677 * Move ourselves to the back of the sync list. 678 */ 679 vn_syncer_add(syncvp, syncdelay); 680 681 /* 682 * Walk the list of vnodes pushing all that are dirty and 683 * not already on the sync list, and freeing vnodes which have 684 * no refs and whos VM objects are empty. vfs_msync() handles 685 * the VM issues and must be called whether the mount is readonly 686 * or not. 687 */ 688 if (vfs_busy(mp, LK_NOWAIT) != 0) 689 return (0); 690 if (mp->mnt_flag & MNT_RDONLY) { 691 vfs_msync(mp, MNT_NOWAIT); 692 } else { 693 asyncflag = mp->mnt_flag & MNT_ASYNC; 694 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 695 vfs_msync(mp, MNT_NOWAIT); 696 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 697 if (asyncflag) 698 mp->mnt_flag |= MNT_ASYNC; 699 } 700 vfs_unbusy(mp); 701 return (0); 702 } 703 704 /* 705 * The syncer vnode is no longer referenced. 706 * 707 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 708 */ 709 static int 710 sync_inactive(struct vop_inactive_args *ap) 711 { 712 vgone_vxlocked(ap->a_vp); 713 return (0); 714 } 715 716 /* 717 * The syncer vnode is no longer needed and is being decommissioned. 718 * This can only occur when the last reference has been released on 719 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 720 * 721 * Modifications to the worklist must be protected with a critical 722 * section. 723 * 724 * sync_reclaim { struct vnode *a_vp } 725 */ 726 static int 727 sync_reclaim(struct vop_reclaim_args *ap) 728 { 729 struct vnode *vp = ap->a_vp; 730 struct syncer_ctx *ctx; 731 732 ctx = vp->v_mount->mnt_syncer_ctx; 733 if (ctx) { 734 lwkt_gettoken(&ctx->sc_token); 735 KKASSERT(vp->v_mount->mnt_syncer != vp); 736 if (vp->v_flag & VONWORKLST) { 737 LIST_REMOVE(vp, v_synclist); 738 vclrflags(vp, VONWORKLST); 739 --ctx->syncer_count; 740 } 741 lwkt_reltoken(&ctx->sc_token); 742 } else { 743 KKASSERT((vp->v_flag & VONWORKLST) == 0); 744 } 745 746 return (0); 747 } 748 749 /* 750 * This is very similar to vmntvnodescan() but it only scans the 751 * vnodes on the syncer list. VFS's which support faster VFS_SYNC 752 * operations use the VISDIRTY flag on the vnode to ensure that vnodes 753 * with dirty inodes are added to the syncer in addition to vnodes 754 * with dirty buffers, and can use this function instead of nmntvnodescan(). 755 * 756 * This scan does not issue VOP_FSYNC()s. The supplied callback is intended 757 * to synchronize the file in the manner intended by the VFS using it. 758 * 759 * This is important when a system has millions of vnodes. 760 */ 761 int 762 vsyncscan( 763 struct mount *mp, 764 int vmsc_flags, 765 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 766 void *data 767 ) { 768 struct syncer_ctx *ctx; 769 struct synclist *slp; 770 struct vnode *vp; 771 int i; 772 int count; 773 int lkflags; 774 775 if (vmsc_flags & VMSC_NOWAIT) 776 lkflags = LK_NOWAIT; 777 else 778 lkflags = 0; 779 780 /* 781 * Syncer list context. This API requires a dedicated syncer thread. 782 * (MNTK_THR_SYNC). 783 */ 784 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 785 ctx = mp->mnt_syncer_ctx; 786 lwkt_gettoken(&ctx->sc_token); 787 788 /* 789 * Setup for loop. Allow races against the syncer thread but 790 * require that the syncer thread no be lazy if we were told 791 * not to be lazy. 792 */ 793 i = ctx->syncer_delayno & ctx->syncer_mask; 794 if ((vmsc_flags & VMSC_NOWAIT) == 0) 795 ++ctx->syncer_forced; 796 for (count = 0; count <= ctx->syncer_mask; ++count) { 797 slp = &ctx->syncer_workitem_pending[i]; 798 799 while ((vp = LIST_FIRST(slp)) != NULL) { 800 KKASSERT(vp->v_mount == mp); 801 if (vmsc_flags & VMSC_GETVP) { 802 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) { 803 slowfunc(mp, vp, data); 804 vput(vp); 805 } 806 } else if (vmsc_flags & VMSC_GETVX) { 807 vx_get(vp); 808 slowfunc(mp, vp, data); 809 vx_put(vp); 810 } else { 811 vhold(vp); 812 slowfunc(mp, vp, data); 813 vdrop(vp); 814 } 815 816 /* 817 * vp could be invalid. However, if vp is still at 818 * the head of the list it is clearly valid and we 819 * can safely move it. 820 */ 821 if (LIST_FIRST(slp) == vp) 822 vn_syncer_add(vp, -(i + syncdelay)); 823 } 824 i = (i + 1) & ctx->syncer_mask; 825 } 826 827 if ((vmsc_flags & VMSC_NOWAIT) == 0) 828 --ctx->syncer_forced; 829 lwkt_reltoken(&ctx->sc_token); 830 return(0); 831 } 832 833 /* 834 * Print out a syncer vnode. 835 * 836 * sync_print { struct vnode *a_vp } 837 */ 838 static int 839 sync_print(struct vop_print_args *ap) 840 { 841 struct vnode *vp = ap->a_vp; 842 843 kprintf("syncer vnode"); 844 lockmgr_printinfo(&vp->v_lock); 845 kprintf("\n"); 846 return (0); 847 } 848 849