1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 #include <sys/thread2.h> 79 80 /* 81 * The workitem queue. 82 */ 83 #define SYNCER_MAXDELAY 32 84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS); 85 time_t syncdelay = 30; /* max time to delay syncing data */ 86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay"); 88 time_t filedelay = 30; /* time to delay syncing files */ 89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 90 &filedelay, 0, "File synchronization delay"); 91 time_t dirdelay = 29; /* time to delay syncing directories */ 92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 93 &dirdelay, 0, "Directory synchronization delay"); 94 time_t metadelay = 28; /* time to delay syncing metadata */ 95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 96 &metadelay, 0, "VFS metadata synchronization delay"); 97 static int rushjob; /* number of slots to run ASAP */ 98 static int stat_rush_requests; /* number of times I/O speeded up */ 99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 100 &stat_rush_requests, 0, ""); 101 102 LIST_HEAD(synclist, vnode); 103 104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 106 #define SC_FLAG_BIOOPS_ALL (0x4) /* do bufops_sync(NULL) */ 107 108 struct syncer_ctx { 109 struct mount *sc_mp; 110 struct lwkt_token sc_token; 111 struct thread *sc_thread; 112 int sc_flags; 113 114 struct synclist *syncer_workitem_pending; 115 long syncer_mask; 116 int syncer_delayno; 117 int syncer_forced; 118 }; 119 120 static struct syncer_ctx syncer_ctx0; 121 122 static void syncer_thread(void *); 123 124 static void 125 syncer_ctx_init(struct syncer_ctx *ctx, struct mount *mp) 126 { 127 ctx->sc_mp = mp; 128 ctx->sc_flags = 0; 129 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF, 130 &ctx->syncer_mask); 131 ctx->syncer_delayno = 0; 132 lwkt_token_init(&ctx->sc_token, "syncer"); 133 } 134 135 /* 136 * Called from vfsinit() 137 */ 138 void 139 vfs_sync_init(void) 140 { 141 syncer_ctx_init(&syncer_ctx0, NULL); 142 syncer_ctx0.sc_flags |= SC_FLAG_BIOOPS_ALL; 143 144 /* Support schedcpu wakeup of syncer0 */ 145 lbolt_syncer = &syncer_ctx0; 146 } 147 148 static int 149 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS) 150 { 151 int error; 152 int v = syncdelay; 153 154 error = sysctl_handle_int(oidp, &v, 0, req); 155 if (error || !req->newptr) 156 return (error); 157 if (v < 1) 158 v = 1; 159 if (v > SYNCER_MAXDELAY) 160 v = SYNCER_MAXDELAY; 161 syncdelay = v; 162 163 return(0); 164 } 165 166 static struct syncer_ctx * 167 vn_get_syncer(struct vnode *vp) 168 { 169 struct mount *mp; 170 struct syncer_ctx *ctx; 171 172 if ((mp = vp->v_mount) != NULL) 173 ctx = mp->mnt_syncer_ctx; 174 else 175 ctx = &syncer_ctx0; 176 return (ctx); 177 } 178 179 /* 180 * The workitem queue. 181 * 182 * It is useful to delay writes of file data and filesystem metadata 183 * for tens of seconds so that quickly created and deleted files need 184 * not waste disk bandwidth being created and removed. To realize this, 185 * we append vnodes to a "workitem" queue. When running with a soft 186 * updates implementation, most pending metadata dependencies should 187 * not wait for more than a few seconds. Thus, mounted on block devices 188 * are delayed only about a half the time that file data is delayed. 189 * Similarly, directory updates are more critical, so are only delayed 190 * about a third the time that file data is delayed. Thus, there are 191 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 192 * one each second (driven off the filesystem syncer process). The 193 * syncer_delayno variable indicates the next queue that is to be processed. 194 * Items that need to be processed soon are placed in this queue: 195 * 196 * syncer_workitem_pending[syncer_delayno] 197 * 198 * A delay of fifteen seconds is done by placing the request fifteen 199 * entries later in the queue: 200 * 201 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 202 * 203 */ 204 205 /* 206 * Add an item to the syncer work queue. 207 * 208 * WARNING: Cannot get vp->v_token here if not already held, we must 209 * depend on the syncer_token (which might already be held by 210 * the caller) to protect v_synclist and VONWORKLST. 211 * 212 * MPSAFE 213 */ 214 void 215 vn_syncer_add(struct vnode *vp, int delay) 216 { 217 struct syncer_ctx *ctx; 218 int slot; 219 220 ctx = vn_get_syncer(vp); 221 222 lwkt_gettoken(&ctx->sc_token); 223 224 if (vp->v_flag & VONWORKLST) 225 LIST_REMOVE(vp, v_synclist); 226 if (delay <= 0) { 227 slot = -delay & ctx->syncer_mask; 228 } else { 229 if (delay > SYNCER_MAXDELAY - 2) 230 delay = SYNCER_MAXDELAY - 2; 231 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 232 } 233 234 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 235 vsetflags(vp, VONWORKLST); 236 237 lwkt_reltoken(&ctx->sc_token); 238 } 239 240 /* 241 * Removes the vnode from the syncer list. Since we might block while 242 * acquiring the syncer_token we have to recheck conditions. 243 * 244 * vp->v_token held on call 245 */ 246 void 247 vn_syncer_remove(struct vnode *vp) 248 { 249 struct syncer_ctx *ctx; 250 251 ctx = vn_get_syncer(vp); 252 253 lwkt_gettoken(&ctx->sc_token); 254 255 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST && 256 RB_EMPTY(&vp->v_rbdirty_tree)) { 257 vclrflags(vp, VONWORKLST); 258 LIST_REMOVE(vp, v_synclist); 259 } 260 261 lwkt_reltoken(&ctx->sc_token); 262 } 263 264 /* 265 * vnode must be locked 266 */ 267 void 268 vclrisdirty(struct vnode *vp) 269 { 270 vclrflags(vp, VISDIRTY); 271 if (vp->v_flag & VONWORKLST) 272 vn_syncer_remove(vp); 273 } 274 275 void 276 vclrobjdirty(struct vnode *vp) 277 { 278 vclrflags(vp, VOBJDIRTY); 279 if (vp->v_flag & VONWORKLST) 280 vn_syncer_remove(vp); 281 } 282 283 /* 284 * vnode must be stable 285 */ 286 void 287 vsetisdirty(struct vnode *vp) 288 { 289 struct syncer_ctx *ctx; 290 291 if ((vp->v_flag & VISDIRTY) == 0) { 292 ctx = vn_get_syncer(vp); 293 vsetflags(vp, VISDIRTY); 294 lwkt_gettoken(&ctx->sc_token); 295 if ((vp->v_flag & VONWORKLST) == 0) 296 vn_syncer_add(vp, syncdelay); 297 lwkt_reltoken(&ctx->sc_token); 298 } 299 } 300 301 void 302 vsetobjdirty(struct vnode *vp) 303 { 304 struct syncer_ctx *ctx; 305 306 if ((vp->v_flag & VOBJDIRTY) == 0) { 307 ctx = vn_get_syncer(vp); 308 vsetflags(vp, VOBJDIRTY); 309 lwkt_gettoken(&ctx->sc_token); 310 if ((vp->v_flag & VONWORKLST) == 0) 311 vn_syncer_add(vp, syncdelay); 312 lwkt_reltoken(&ctx->sc_token); 313 } 314 } 315 316 /* 317 * Create per-filesystem syncer process 318 */ 319 void 320 vn_syncer_thr_create(struct mount *mp) 321 { 322 struct syncer_ctx *ctx; 323 static int syncalloc = 0; 324 int rc; 325 326 if (mp->mnt_kern_flag & MNTK_THR_SYNC) { 327 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, 328 M_WAITOK | M_ZERO); 329 syncer_ctx_init(ctx, mp); 330 mp->mnt_syncer_ctx = ctx; 331 rc = kthread_create(syncer_thread, ctx, &ctx->sc_thread, 332 "syncer%d", ++syncalloc); 333 } else { 334 mp->mnt_syncer_ctx = &syncer_ctx0; 335 } 336 } 337 338 /* 339 * Stop per-filesystem syncer process 340 */ 341 void 342 vn_syncer_thr_stop(struct mount *mp) 343 { 344 struct syncer_ctx *ctx; 345 346 ctx = mp->mnt_syncer_ctx; 347 if (ctx == NULL || ctx == &syncer_ctx0) 348 return; 349 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 350 351 lwkt_gettoken(&ctx->sc_token); 352 353 /* Signal the syncer process to exit */ 354 ctx->sc_flags |= SC_FLAG_EXIT; 355 wakeup(ctx); 356 357 /* Wait till syncer process exits */ 358 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 359 tsleep(&ctx->sc_flags, 0, "syncexit", hz); 360 361 mp->mnt_syncer_ctx = NULL; 362 lwkt_reltoken(&ctx->sc_token); 363 364 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 365 kfree(ctx, M_TEMP); 366 } 367 368 struct thread *updatethread; 369 370 /* 371 * System filesystem synchronizer daemon. 372 */ 373 static void 374 syncer_thread(void *_ctx) 375 { 376 struct thread *td = curthread; 377 struct syncer_ctx *ctx = _ctx; 378 struct synclist *slp; 379 struct vnode *vp; 380 long starttime; 381 int *sc_flagsp; 382 int sc_flags; 383 int vnodes_synced = 0; 384 385 /* 386 * syncer0 runs till system shutdown; per-filesystem syncers are 387 * terminated on filesystem unmount 388 */ 389 if (ctx == &syncer_ctx0) 390 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 391 SHUTDOWN_PRI_LAST); 392 for (;;) { 393 kproc_suspend_loop(); 394 395 starttime = time_uptime; 396 lwkt_gettoken(&ctx->sc_token); 397 398 /* 399 * Push files whose dirty time has expired. Be careful 400 * of interrupt race on slp queue. 401 */ 402 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 403 ctx->syncer_delayno = (ctx->syncer_delayno + 1) & 404 ctx->syncer_mask; 405 406 while ((vp = LIST_FIRST(slp)) != NULL) { 407 if (ctx->syncer_forced) { 408 if (vget(vp, LK_EXCLUSIVE) == 0) { 409 VOP_FSYNC(vp, MNT_NOWAIT, 0); 410 vput(vp); 411 vnodes_synced++; 412 } 413 } else { 414 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 415 VOP_FSYNC(vp, MNT_LAZY, 0); 416 vput(vp); 417 vnodes_synced++; 418 } 419 } 420 421 /* 422 * vp is stale but can still be used if we can 423 * verify that it remains at the head of the list. 424 * Be careful not to try to get vp->v_token as 425 * vp can become stale if this blocks. 426 * 427 * If the vp is still at the head of the list were 428 * unable to completely flush it and move it to 429 * a later slot to give other vnodes a fair shot. 430 * 431 * Note that v_tag VT_VFS vnodes can remain on the 432 * worklist with no dirty blocks, but sync_fsync() 433 * moves it to a later slot so we will never see it 434 * here. 435 * 436 * It is possible to race a vnode with no dirty 437 * buffers being removed from the list. If this 438 * occurs we will move the vnode in the synclist 439 * and then the other thread will remove it. Do 440 * not try to remove it here. 441 */ 442 if (LIST_FIRST(slp) == vp) 443 vn_syncer_add(vp, syncdelay); 444 } 445 446 sc_flags = ctx->sc_flags; 447 448 /* Exit on unmount */ 449 if (sc_flags & SC_FLAG_EXIT) 450 break; 451 452 lwkt_reltoken(&ctx->sc_token); 453 454 /* 455 * Do sync processing for each mount. 456 */ 457 if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL) 458 bio_ops_sync(ctx->sc_mp); 459 460 /* 461 * The variable rushjob allows the kernel to speed up the 462 * processing of the filesystem syncer process. A rushjob 463 * value of N tells the filesystem syncer to process the next 464 * N seconds worth of work on its queue ASAP. Currently rushjob 465 * is used by the soft update code to speed up the filesystem 466 * syncer process when the incore state is getting so far 467 * ahead of the disk that the kernel memory pool is being 468 * threatened with exhaustion. 469 */ 470 if (ctx == &syncer_ctx0 && rushjob > 0) { 471 atomic_subtract_int(&rushjob, 1); 472 continue; 473 } 474 /* 475 * If it has taken us less than a second to process the 476 * current work, then wait. Otherwise start right over 477 * again. We can still lose time if any single round 478 * takes more than two seconds, but it does not really 479 * matter as we are just trying to generally pace the 480 * filesystem activity. 481 */ 482 if (time_uptime == starttime) 483 tsleep(ctx, 0, "syncer", hz); 484 } 485 486 /* 487 * Unmount/exit path for per-filesystem syncers; sc_token held 488 */ 489 ctx->sc_flags |= SC_FLAG_DONE; 490 sc_flagsp = &ctx->sc_flags; 491 lwkt_reltoken(&ctx->sc_token); 492 wakeup(sc_flagsp); 493 494 kthread_exit(); 495 } 496 497 static void 498 syncer_thread_start(void) 499 { 500 syncer_thread(&syncer_ctx0); 501 } 502 503 static struct kproc_desc up_kp = { 504 "syncer0", 505 syncer_thread_start, 506 &updatethread 507 }; 508 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 509 510 /* 511 * Request the syncer daemon to speed up its work. 512 * We never push it to speed up more than half of its 513 * normal turn time, otherwise it could take over the cpu. 514 */ 515 int 516 speedup_syncer(void) 517 { 518 /* 519 * Don't bother protecting the test. unsleep_and_wakeup_thread() 520 * will only do something real if the thread is in the right state. 521 */ 522 wakeup(lbolt_syncer); 523 if (rushjob < syncdelay / 2) { 524 atomic_add_int(&rushjob, 1); 525 stat_rush_requests += 1; 526 return (1); 527 } 528 return(0); 529 } 530 531 /* 532 * Routine to create and manage a filesystem syncer vnode. 533 */ 534 static int sync_close(struct vop_close_args *); 535 static int sync_fsync(struct vop_fsync_args *); 536 static int sync_inactive(struct vop_inactive_args *); 537 static int sync_reclaim (struct vop_reclaim_args *); 538 static int sync_print(struct vop_print_args *); 539 540 static struct vop_ops sync_vnode_vops = { 541 .vop_default = vop_eopnotsupp, 542 .vop_close = sync_close, 543 .vop_fsync = sync_fsync, 544 .vop_inactive = sync_inactive, 545 .vop_reclaim = sync_reclaim, 546 .vop_print = sync_print, 547 }; 548 549 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 550 551 VNODEOP_SET(sync_vnode_vops); 552 553 /* 554 * Create a new filesystem syncer vnode for the specified mount point. 555 * This vnode is placed on the worklist and is responsible for sync'ing 556 * the filesystem. 557 * 558 * NOTE: read-only mounts are also placed on the worklist. The filesystem 559 * sync code is also responsible for cleaning up vnodes. 560 */ 561 int 562 vfs_allocate_syncvnode(struct mount *mp) 563 { 564 struct vnode *vp; 565 static long start, incr, next; 566 int error; 567 568 /* Allocate a new vnode */ 569 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 570 if (error) { 571 mp->mnt_syncer = NULL; 572 return (error); 573 } 574 vp->v_type = VNON; 575 /* 576 * Place the vnode onto the syncer worklist. We attempt to 577 * scatter them about on the list so that they will go off 578 * at evenly distributed times even if all the filesystems 579 * are mounted at once. 580 */ 581 next += incr; 582 if (next == 0 || next > SYNCER_MAXDELAY) { 583 start /= 2; 584 incr /= 2; 585 if (start == 0) { 586 start = SYNCER_MAXDELAY / 2; 587 incr = SYNCER_MAXDELAY; 588 } 589 next = start; 590 } 591 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 592 593 /* 594 * The mnt_syncer field inherits the vnode reference, which is 595 * held until later decomissioning. 596 */ 597 mp->mnt_syncer = vp; 598 vx_unlock(vp); 599 return (0); 600 } 601 602 static int 603 sync_close(struct vop_close_args *ap) 604 { 605 return (0); 606 } 607 608 /* 609 * Do a lazy sync of the filesystem. 610 * 611 * sync_fsync { struct vnode *a_vp, int a_waitfor } 612 */ 613 static int 614 sync_fsync(struct vop_fsync_args *ap) 615 { 616 struct vnode *syncvp = ap->a_vp; 617 struct mount *mp = syncvp->v_mount; 618 int asyncflag; 619 620 /* 621 * We only need to do something if this is a lazy evaluation. 622 */ 623 if ((ap->a_waitfor & MNT_LAZY) == 0) 624 return (0); 625 626 /* 627 * Move ourselves to the back of the sync list. 628 */ 629 vn_syncer_add(syncvp, syncdelay); 630 631 /* 632 * Walk the list of vnodes pushing all that are dirty and 633 * not already on the sync list, and freeing vnodes which have 634 * no refs and whos VM objects are empty. vfs_msync() handles 635 * the VM issues and must be called whether the mount is readonly 636 * or not. 637 */ 638 if (vfs_busy(mp, LK_NOWAIT) != 0) 639 return (0); 640 if (mp->mnt_flag & MNT_RDONLY) { 641 vfs_msync(mp, MNT_NOWAIT); 642 } else { 643 asyncflag = mp->mnt_flag & MNT_ASYNC; 644 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 645 vfs_msync(mp, MNT_NOWAIT); 646 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 647 if (asyncflag) 648 mp->mnt_flag |= MNT_ASYNC; 649 } 650 vfs_unbusy(mp); 651 return (0); 652 } 653 654 /* 655 * The syncer vnode is no longer referenced. 656 * 657 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 658 */ 659 static int 660 sync_inactive(struct vop_inactive_args *ap) 661 { 662 vgone_vxlocked(ap->a_vp); 663 return (0); 664 } 665 666 /* 667 * The syncer vnode is no longer needed and is being decommissioned. 668 * This can only occur when the last reference has been released on 669 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 670 * 671 * Modifications to the worklist must be protected with a critical 672 * section. 673 * 674 * sync_reclaim { struct vnode *a_vp } 675 */ 676 static int 677 sync_reclaim(struct vop_reclaim_args *ap) 678 { 679 struct vnode *vp = ap->a_vp; 680 struct syncer_ctx *ctx; 681 682 ctx = vn_get_syncer(vp); 683 684 lwkt_gettoken(&ctx->sc_token); 685 KKASSERT(vp->v_mount->mnt_syncer != vp); 686 if (vp->v_flag & VONWORKLST) { 687 LIST_REMOVE(vp, v_synclist); 688 vclrflags(vp, VONWORKLST); 689 } 690 lwkt_reltoken(&ctx->sc_token); 691 692 return (0); 693 } 694 695 /* 696 * This is very similar to vmntvnodescan() but it only scans the 697 * vnodes on the syncer list. VFS's which support faster VFS_SYNC 698 * operations use the VISDIRTY flag on the vnode to ensure that vnodes 699 * with dirty inodes are added to the syncer in addition to vnodes 700 * with dirty buffers, and can use this function instead of nmntvnodescan(). 701 * 702 * This is important when a system has millions of vnodes. 703 */ 704 int 705 vsyncscan( 706 struct mount *mp, 707 int vmsc_flags, 708 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 709 void *data 710 ) { 711 struct syncer_ctx *ctx; 712 struct synclist *slp; 713 struct vnode *vp; 714 int b; 715 int i; 716 int lkflags; 717 718 if (vmsc_flags & VMSC_NOWAIT) 719 lkflags = LK_NOWAIT; 720 else 721 lkflags = 0; 722 723 /* 724 * Syncer list context. This API requires a dedicated syncer thread. 725 * (MNTK_THR_SYNC). 726 */ 727 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 728 ctx = mp->mnt_syncer_ctx; 729 KKASSERT(ctx != &syncer_ctx0); 730 731 lwkt_gettoken(&ctx->sc_token); 732 733 /* 734 * Setup for loop. Allow races against the syncer thread but 735 * require that the syncer thread no be lazy if we were told 736 * not to be lazy. 737 */ 738 b = ctx->syncer_delayno & ctx->syncer_mask; 739 i = b; 740 if ((vmsc_flags & VMSC_NOWAIT) == 0) 741 ++ctx->syncer_forced; 742 743 do { 744 slp = &ctx->syncer_workitem_pending[i]; 745 746 while ((vp = LIST_FIRST(slp)) != NULL) { 747 KKASSERT(vp->v_mount == mp); 748 if (vmsc_flags & VMSC_GETVP) { 749 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) { 750 slowfunc(mp, vp, data); 751 vput(vp); 752 } 753 } else if (vmsc_flags & VMSC_GETVX) { 754 vx_get(vp); 755 slowfunc(mp, vp, data); 756 vx_put(vp); 757 } else { 758 vhold(vp); 759 slowfunc(mp, vp, data); 760 vdrop(vp); 761 } 762 if (LIST_FIRST(slp) == vp) 763 vn_syncer_add(vp, -(i + syncdelay)); 764 } 765 i = (i + 1) & ctx->syncer_mask; 766 } while (i != b); 767 768 if ((vmsc_flags & VMSC_NOWAIT) == 0) 769 --ctx->syncer_forced; 770 lwkt_reltoken(&ctx->sc_token); 771 return(0); 772 } 773 774 /* 775 * Print out a syncer vnode. 776 * 777 * sync_print { struct vnode *a_vp } 778 */ 779 static int 780 sync_print(struct vop_print_args *ap) 781 { 782 struct vnode *vp = ap->a_vp; 783 784 kprintf("syncer vnode"); 785 lockmgr_printinfo(&vp->v_lock); 786 kprintf("\n"); 787 return (0); 788 } 789 790