145748Smckusick /*
263379Sbostic * Copyright (c) 1991, 1993
363379Sbostic * The Regents of the University of California. All rights reserved.
445748Smckusick *
545748Smckusick * This code is derived from software contributed to Berkeley by
645748Smckusick * The Mach Operating System project at Carnegie-Mellon University.
745748Smckusick *
848493Smckusick * %sccs.include.redist.c%
945748Smckusick *
10*69930Smckusick * @(#)vm_pageout.c 8.7 (Berkeley) 06/19/95
1148493Smckusick *
1248493Smckusick *
1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University.
1448493Smckusick * All rights reserved.
1548493Smckusick *
1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1748493Smckusick *
1848493Smckusick * Permission to use, copy, modify and distribute this software and
1948493Smckusick * its documentation is hereby granted, provided that both the copyright
2048493Smckusick * notice and this permission notice appear in all copies of the
2148493Smckusick * software, derivative works or modified versions, and any portions
2248493Smckusick * thereof, and that both notices appear in supporting documentation.
2348493Smckusick *
2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2748493Smckusick *
2848493Smckusick * Carnegie Mellon requests users of this software to return to
2948493Smckusick *
3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
3148493Smckusick * School of Computer Science
3248493Smckusick * Carnegie Mellon University
3348493Smckusick * Pittsburgh PA 15213-3890
3448493Smckusick *
3548493Smckusick * any improvements or extensions that they make and grant Carnegie the
3648493Smckusick * rights to redistribute these changes.
3745748Smckusick */
3845748Smckusick
3945748Smckusick /*
4045748Smckusick * The proverbial page-out daemon.
4145748Smckusick */
4245748Smckusick
4353359Sbostic #include <sys/param.h>
4445748Smckusick
4553359Sbostic #include <vm/vm.h>
4653359Sbostic #include <vm/vm_page.h>
4753359Sbostic #include <vm/vm_pageout.h>
4848386Skarels
4965690Shibler #ifndef VM_PAGE_FREE_MIN
5065690Shibler #define VM_PAGE_FREE_MIN (cnt.v_free_count / 20)
5165690Shibler #endif
5265690Shibler
5365690Shibler #ifndef VM_PAGE_FREE_TARGET
5465690Shibler #define VM_PAGE_FREE_TARGET ((cnt.v_free_min * 4) / 3)
5565690Shibler #endif
5665690Shibler
5765690Shibler int vm_page_free_min_min = 16 * 1024;
5865690Shibler int vm_page_free_min_max = 256 * 1024;
5965690Shibler
6050554Smckusick int vm_pages_needed; /* Event on which pageout daemon sleeps */
6145748Smckusick
6261003Shibler int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */
6361003Shibler
6465690Shibler #ifdef CLUSTERED_PAGEOUT
6565690Shibler #define MAXPOCLUSTER (MAXPHYS/NBPG) /* XXX */
6665690Shibler int doclustered_pageout = 1;
6765690Shibler #endif
6865690Shibler
6945748Smckusick /*
7045748Smckusick * vm_pageout_scan does the dirty work for the pageout daemon.
7145748Smckusick */
7253359Sbostic void
vm_pageout_scan()7345748Smckusick vm_pageout_scan()
7445748Smckusick {
7565231Smckusick register vm_page_t m, next;
7645748Smckusick register int page_shortage;
7745748Smckusick register int s;
7845748Smckusick register int pages_freed;
7945748Smckusick int free;
8065690Shibler vm_object_t object;
8145748Smckusick
8245748Smckusick /*
8345748Smckusick * Only continue when we want more pages to be "free"
8445748Smckusick */
8545748Smckusick
8665690Shibler cnt.v_rev++;
8765690Shibler
8845748Smckusick s = splimp();
8945748Smckusick simple_lock(&vm_page_queue_free_lock);
9050922Smckusick free = cnt.v_free_count;
9145748Smckusick simple_unlock(&vm_page_queue_free_lock);
9245748Smckusick splx(s);
9345748Smckusick
9450922Smckusick if (free < cnt.v_free_target) {
9545748Smckusick swapout_threads();
9645748Smckusick
9745748Smckusick /*
9845748Smckusick * Be sure the pmap system is updated so
9945748Smckusick * we can scan the inactive queue.
10045748Smckusick */
10145748Smckusick
10245748Smckusick pmap_update();
10345748Smckusick }
10445748Smckusick
10545748Smckusick /*
10645748Smckusick * Acquire the resident page system lock,
10745748Smckusick * as we may be changing what's resident quite a bit.
10845748Smckusick */
10945748Smckusick vm_page_lock_queues();
11045748Smckusick
11145748Smckusick /*
11245748Smckusick * Start scanning the inactive queue for pages we can free.
11345748Smckusick * We keep scanning until we have enough free pages or
11445748Smckusick * we have scanned through the entire queue. If we
11545748Smckusick * encounter dirty pages, we start cleaning them.
11645748Smckusick */
11745748Smckusick
11845748Smckusick pages_freed = 0;
11965231Smckusick for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) {
12045748Smckusick s = splimp();
12145748Smckusick simple_lock(&vm_page_queue_free_lock);
12250922Smckusick free = cnt.v_free_count;
12345748Smckusick simple_unlock(&vm_page_queue_free_lock);
12445748Smckusick splx(s);
12550922Smckusick if (free >= cnt.v_free_target)
12645748Smckusick break;
12745748Smckusick
12865690Shibler cnt.v_scan++;
12965690Shibler next = m->pageq.tqe_next;
13065690Shibler
13156918Shibler /*
13256918Shibler * If the page has been referenced, move it back to the
13356918Shibler * active queue.
13456918Shibler */
13556918Shibler if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
13656918Shibler vm_page_activate(m);
13756918Shibler cnt.v_reactivated++;
13856918Shibler continue;
13956918Shibler }
14056918Shibler
14156918Shibler /*
14256918Shibler * If the page is clean, free it up.
14356918Shibler */
14456382Smckusick if (m->flags & PG_CLEAN) {
14556918Shibler object = m->object;
14656918Shibler if (vm_object_lock_try(object)) {
14749293Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m),
14849293Shibler VM_PROT_NONE);
14956918Shibler vm_page_free(m);
15045748Smckusick pages_freed++;
15165690Shibler cnt.v_dfree++;
15245748Smckusick vm_object_unlock(object);
15345748Smckusick }
15456918Shibler continue;
15545748Smckusick }
15645748Smckusick
15756918Shibler /*
15856918Shibler * If the page is dirty but already being washed, skip it.
15956918Shibler */
16065231Smckusick if ((m->flags & PG_LAUNDRY) == 0)
16156918Shibler continue;
16245748Smckusick
16356918Shibler /*
16456918Shibler * Otherwise the page is dirty and still in the laundry,
16556918Shibler * so we start the cleaning operation and remove it from
16656918Shibler * the laundry.
16756918Shibler */
16856918Shibler object = m->object;
16965231Smckusick if (!vm_object_lock_try(object))
17056918Shibler continue;
17156918Shibler cnt.v_pageouts++;
17265690Shibler #ifdef CLUSTERED_PAGEOUT
17365690Shibler if (object->pager &&
17465690Shibler vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
17565690Shibler vm_pageout_cluster(m, object);
17665690Shibler else
17765690Shibler #endif
17865690Shibler vm_pageout_page(m, object);
17968164Scgd thread_wakeup(object);
18065690Shibler vm_object_unlock(object);
18165690Shibler /*
18265690Shibler * Former next page may no longer even be on the inactive
18365690Shibler * queue (due to potential blocking in the pager with the
18465690Shibler * queues unlocked). If it isn't, we just start over.
18565690Shibler */
18666087Shibler if (next && (next->flags & PG_INACTIVE) == 0)
18765690Shibler next = vm_page_queue_inactive.tqh_first;
18865690Shibler }
18965690Shibler
19065690Shibler /*
19165690Shibler * Compute the page shortage. If we are still very low on memory
19265690Shibler * be sure that we will move a minimal amount of pages from active
19365690Shibler * to inactive.
19465690Shibler */
19545748Smckusick
19665690Shibler page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
19765690Shibler if (page_shortage <= 0 && pages_freed == 0)
19865690Shibler page_shortage = 1;
19965690Shibler
20065690Shibler while (page_shortage > 0) {
20156918Shibler /*
20265690Shibler * Move some more pages from active to inactive.
20356918Shibler */
20465690Shibler
20565690Shibler if ((m = vm_page_queue_active.tqh_first) == NULL)
20665690Shibler break;
20765690Shibler vm_page_deactivate(m);
20865690Shibler page_shortage--;
20965690Shibler }
21065690Shibler
21165690Shibler vm_page_unlock_queues();
21265690Shibler }
21365690Shibler
21465690Shibler /*
21565690Shibler * Called with object and page queues locked.
21665690Shibler * If reactivate is TRUE, a pager error causes the page to be
21765690Shibler * put back on the active queue, ow it is left on the inactive queue.
21865690Shibler */
21965690Shibler void
vm_pageout_page(m,object)22065690Shibler vm_pageout_page(m, object)
22165690Shibler vm_page_t m;
22265690Shibler vm_object_t object;
22365690Shibler {
22465690Shibler vm_pager_t pager;
22565690Shibler int pageout_status;
22665690Shibler
22765690Shibler /*
22865690Shibler * We set the busy bit to cause potential page faults on
22965690Shibler * this page to block.
23065690Shibler *
23165690Shibler * We also set pageout-in-progress to keep the object from
23265690Shibler * disappearing during pageout. This guarantees that the
23365690Shibler * page won't move from the inactive queue. (However, any
23465690Shibler * other page on the inactive queue may move!)
23565690Shibler */
23665690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
23765690Shibler m->flags |= PG_BUSY;
23865690Shibler
23965690Shibler /*
24065690Shibler * Try to collapse the object before making a pager for it.
24165690Shibler * We must unlock the page queues first.
24265690Shibler */
24365690Shibler vm_page_unlock_queues();
24465690Shibler if (object->pager == NULL)
24556918Shibler vm_object_collapse(object);
24645748Smckusick
24765690Shibler object->paging_in_progress++;
24865690Shibler vm_object_unlock(object);
24945748Smckusick
25065690Shibler /*
25165690Shibler * Do a wakeup here in case the following operations block.
25265690Shibler */
25368164Scgd thread_wakeup(&cnt.v_free_count);
25465690Shibler
25565690Shibler /*
25665690Shibler * If there is no pager for the page, use the default pager.
25765690Shibler * If there is no place to put the page at the moment,
25865690Shibler * leave it in the laundry and hope that there will be
25965690Shibler * paging space later.
26065690Shibler */
26165690Shibler if ((pager = object->pager) == NULL) {
26265690Shibler pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, object->size,
26365690Shibler VM_PROT_ALL, (vm_offset_t)0);
26465690Shibler if (pager != NULL)
26565690Shibler vm_object_setpager(object, pager, 0, FALSE);
26665690Shibler }
26765690Shibler pageout_status = pager ? vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL;
26865690Shibler vm_object_lock(object);
26965690Shibler vm_page_lock_queues();
27065690Shibler
27165690Shibler switch (pageout_status) {
27265690Shibler case VM_PAGER_OK:
27365690Shibler case VM_PAGER_PEND:
27465690Shibler cnt.v_pgpgout++;
27565690Shibler m->flags &= ~PG_LAUNDRY;
27665690Shibler break;
27765690Shibler case VM_PAGER_BAD:
27856918Shibler /*
27965690Shibler * Page outside of range of object. Right now we
28065690Shibler * essentially lose the changes by pretending it
28165690Shibler * worked.
28265690Shibler *
28365690Shibler * XXX dubious, what should we do?
28456918Shibler */
28565690Shibler m->flags &= ~PG_LAUNDRY;
28665690Shibler m->flags |= PG_CLEAN;
28765690Shibler pmap_clear_modify(VM_PAGE_TO_PHYS(m));
28865690Shibler break;
28965690Shibler case VM_PAGER_AGAIN:
29065690Shibler {
29165690Shibler extern int lbolt;
29245748Smckusick
29356918Shibler /*
29465690Shibler * FAIL on a write is interpreted to mean a resource
29565690Shibler * shortage, so we put pause for awhile and try again.
29665690Shibler * XXX could get stuck here.
29756918Shibler */
298*69930Smckusick vm_page_unlock_queues();
299*69930Smckusick vm_object_unlock(object);
30065690Shibler (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
301*69930Smckusick vm_object_lock(object);
302*69930Smckusick vm_page_lock_queues();
30365690Shibler break;
30465690Shibler }
30565690Shibler case VM_PAGER_FAIL:
30665690Shibler case VM_PAGER_ERROR:
30765690Shibler /*
30865690Shibler * If page couldn't be paged out, then reactivate
30965690Shibler * the page so it doesn't clog the inactive list.
31065690Shibler * (We will try paging out it again later).
31165690Shibler */
31265690Shibler vm_page_activate(m);
31365690Shibler cnt.v_reactivated++;
31465690Shibler break;
31565690Shibler }
31645748Smckusick
31765690Shibler pmap_clear_reference(VM_PAGE_TO_PHYS(m));
31865690Shibler
31965690Shibler /*
32065690Shibler * If the operation is still going, leave the page busy
32165690Shibler * to block all other accesses. Also, leave the paging
32265690Shibler * in progress indicator set so that we don't attempt an
32365690Shibler * object collapse.
32465690Shibler */
32565690Shibler if (pageout_status != VM_PAGER_PEND) {
32665690Shibler m->flags &= ~PG_BUSY;
32765690Shibler PAGE_WAKEUP(m);
32865690Shibler object->paging_in_progress--;
32965690Shibler }
33065690Shibler }
33165690Shibler
33265690Shibler #ifdef CLUSTERED_PAGEOUT
33365690Shibler #define PAGEOUTABLE(p) \
33465690Shibler ((((p)->flags & (PG_INACTIVE|PG_CLEAN|PG_LAUNDRY)) == \
33565690Shibler (PG_INACTIVE|PG_LAUNDRY)) && !pmap_is_referenced(VM_PAGE_TO_PHYS(p)))
33665690Shibler
33765690Shibler /*
33865690Shibler * Attempt to pageout as many contiguous (to ``m'') dirty pages as possible
33965690Shibler * from ``object''. Using information returned from the pager, we assemble
34065690Shibler * a sorted list of contiguous dirty pages and feed them to the pager in one
34165690Shibler * chunk. Called with paging queues and object locked. Also, object must
34265690Shibler * already have a pager.
34365690Shibler */
34465690Shibler void
vm_pageout_cluster(m,object)34565690Shibler vm_pageout_cluster(m, object)
34665690Shibler vm_page_t m;
34765690Shibler vm_object_t object;
34865690Shibler {
34965690Shibler vm_offset_t offset, loff, hoff;
35065690Shibler vm_page_t plist[MAXPOCLUSTER], *plistp, p;
35165690Shibler int postatus, ix, count;
35265690Shibler
35365690Shibler /*
35465690Shibler * Determine the range of pages that can be part of a cluster
35565690Shibler * for this object/offset. If it is only our single page, just
35665690Shibler * do it normally.
35765690Shibler */
35865690Shibler vm_pager_cluster(object->pager, m->offset, &loff, &hoff);
35965690Shibler if (hoff - loff == PAGE_SIZE) {
36065690Shibler vm_pageout_page(m, object);
36165690Shibler return;
36265690Shibler }
36365690Shibler
36465690Shibler plistp = plist;
36565690Shibler
36665690Shibler /*
36765690Shibler * Target page is always part of the cluster.
36865690Shibler */
36965690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
37065690Shibler m->flags |= PG_BUSY;
37165690Shibler plistp[atop(m->offset - loff)] = m;
37265690Shibler count = 1;
37365690Shibler
37465690Shibler /*
37565690Shibler * Backup from the given page til we find one not fulfilling
37665690Shibler * the pageout criteria or we hit the lower bound for the
37765690Shibler * cluster. For each page determined to be part of the
37865690Shibler * cluster, unmap it and busy it out so it won't change.
37965690Shibler */
38065690Shibler ix = atop(m->offset - loff);
38165690Shibler offset = m->offset;
38265690Shibler while (offset > loff && count < MAXPOCLUSTER-1) {
38365690Shibler p = vm_page_lookup(object, offset - PAGE_SIZE);
38465690Shibler if (p == NULL || !PAGEOUTABLE(p))
38565690Shibler break;
38665690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
38765690Shibler p->flags |= PG_BUSY;
38865690Shibler plistp[--ix] = p;
38965690Shibler offset -= PAGE_SIZE;
39065690Shibler count++;
39165690Shibler }
39265690Shibler plistp += atop(offset - loff);
39365690Shibler loff = offset;
39465690Shibler
39565690Shibler /*
39665690Shibler * Now do the same moving forward from the target.
39765690Shibler */
39865690Shibler ix = atop(m->offset - loff) + 1;
39965690Shibler offset = m->offset + PAGE_SIZE;
40065690Shibler while (offset < hoff && count < MAXPOCLUSTER) {
40165690Shibler p = vm_page_lookup(object, offset);
40265690Shibler if (p == NULL || !PAGEOUTABLE(p))
40365690Shibler break;
40465690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
40565690Shibler p->flags |= PG_BUSY;
40665690Shibler plistp[ix++] = p;
40765690Shibler offset += PAGE_SIZE;
40865690Shibler count++;
40965690Shibler }
41065690Shibler hoff = offset;
41165690Shibler
41265690Shibler /*
41365690Shibler * Pageout the page.
41465690Shibler * Unlock everything and do a wakeup prior to the pager call
41565690Shibler * in case it blocks.
41665690Shibler */
41765690Shibler vm_page_unlock_queues();
41865690Shibler object->paging_in_progress++;
41965690Shibler vm_object_unlock(object);
42065690Shibler again:
42168164Scgd thread_wakeup(&cnt.v_free_count);
42265690Shibler postatus = vm_pager_put_pages(object->pager, plistp, count, FALSE);
42365690Shibler /*
42465690Shibler * XXX rethink this
42565690Shibler */
42665690Shibler if (postatus == VM_PAGER_AGAIN) {
42765690Shibler extern int lbolt;
42865690Shibler
42965690Shibler (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
43065690Shibler goto again;
43165690Shibler } else if (postatus == VM_PAGER_BAD)
43265690Shibler panic("vm_pageout_cluster: VM_PAGER_BAD");
43365690Shibler vm_object_lock(object);
43465690Shibler vm_page_lock_queues();
43565690Shibler
43665690Shibler /*
43765690Shibler * Loop through the affected pages, reflecting the outcome of
43865690Shibler * the operation.
43965690Shibler */
44065690Shibler for (ix = 0; ix < count; ix++) {
44165690Shibler p = *plistp++;
44265690Shibler switch (postatus) {
44356918Shibler case VM_PAGER_OK:
44456918Shibler case VM_PAGER_PEND:
44565690Shibler cnt.v_pgpgout++;
44665690Shibler p->flags &= ~PG_LAUNDRY;
44756918Shibler break;
44856918Shibler case VM_PAGER_FAIL:
44956918Shibler case VM_PAGER_ERROR:
45056918Shibler /*
45165690Shibler * Pageout failed, reactivate the target page so it
45265690Shibler * doesn't clog the inactive list. Other pages are
45365690Shibler * left as they are.
45456918Shibler */
45565690Shibler if (p == m) {
45665690Shibler vm_page_activate(p);
45765690Shibler cnt.v_reactivated++;
45865690Shibler }
45956918Shibler break;
46056918Shibler }
46165690Shibler pmap_clear_reference(VM_PAGE_TO_PHYS(p));
46256918Shibler /*
46356918Shibler * If the operation is still going, leave the page busy
46465690Shibler * to block all other accesses.
46556918Shibler */
46665690Shibler if (postatus != VM_PAGER_PEND) {
46765690Shibler p->flags &= ~PG_BUSY;
46865690Shibler PAGE_WAKEUP(p);
46965690Shibler
47045748Smckusick }
47145748Smckusick }
47245748Smckusick /*
47365690Shibler * If the operation is still going, leave the paging in progress
47465690Shibler * indicator set so that we don't attempt an object collapse.
47545748Smckusick */
47665690Shibler if (postatus != VM_PAGER_PEND)
47765690Shibler object->paging_in_progress--;
47845748Smckusick
47945748Smckusick }
48065690Shibler #endif
48145748Smckusick
48245748Smckusick /*
48345748Smckusick * vm_pageout is the high level pageout daemon.
48445748Smckusick */
48545748Smckusick
48668164Scgd void
vm_pageout()48768164Scgd vm_pageout()
48845748Smckusick {
48945748Smckusick (void) spl0();
49045748Smckusick
49145748Smckusick /*
49245748Smckusick * Initialize some paging parameters.
49345748Smckusick */
49445748Smckusick
49550922Smckusick if (cnt.v_free_min == 0) {
49665690Shibler cnt.v_free_min = VM_PAGE_FREE_MIN;
49765690Shibler vm_page_free_min_min /= cnt.v_page_size;
49865690Shibler vm_page_free_min_max /= cnt.v_page_size;
49965690Shibler if (cnt.v_free_min < vm_page_free_min_min)
50065690Shibler cnt.v_free_min = vm_page_free_min_min;
50165690Shibler if (cnt.v_free_min > vm_page_free_min_max)
50265690Shibler cnt.v_free_min = vm_page_free_min_max;
50345748Smckusick }
50445748Smckusick
50550922Smckusick if (cnt.v_free_target == 0)
50665690Shibler cnt.v_free_target = VM_PAGE_FREE_TARGET;
50745748Smckusick
50850922Smckusick if (cnt.v_free_target <= cnt.v_free_min)
50950922Smckusick cnt.v_free_target = cnt.v_free_min + 1;
51045748Smckusick
51161003Shibler /* XXX does not really belong here */
51261003Shibler if (vm_page_max_wired == 0)
51361003Shibler vm_page_max_wired = cnt.v_free_count / 3;
51461003Shibler
51545748Smckusick /*
51645748Smckusick * The pageout daemon is never done, so loop
51745748Smckusick * forever.
51845748Smckusick */
51945748Smckusick
52045748Smckusick simple_lock(&vm_pages_needed_lock);
52145748Smckusick while (TRUE) {
52268164Scgd thread_sleep(&vm_pages_needed, &vm_pages_needed_lock, FALSE);
52356918Shibler /*
52456918Shibler * Compute the inactive target for this scan.
52556918Shibler * We need to keep a reasonable amount of memory in the
52656918Shibler * inactive list to better simulate LRU behavior.
52756918Shibler */
52856918Shibler cnt.v_inactive_target =
52956918Shibler (cnt.v_active_count + cnt.v_inactive_count) / 3;
53056918Shibler if (cnt.v_inactive_target <= cnt.v_free_target)
53156918Shibler cnt.v_inactive_target = cnt.v_free_target + 1;
53256918Shibler
53365690Shibler /*
53465690Shibler * Only make a scan if we are likely to do something.
53565690Shibler * Otherwise we might have been awakened by a pager
53665690Shibler * to clean up async pageouts.
53765690Shibler */
53865690Shibler if (cnt.v_free_count < cnt.v_free_target ||
53965690Shibler cnt.v_inactive_count < cnt.v_inactive_target)
54065690Shibler vm_pageout_scan();
54145748Smckusick vm_pager_sync();
54245748Smckusick simple_lock(&vm_pages_needed_lock);
54368164Scgd thread_wakeup(&cnt.v_free_count);
54445748Smckusick }
54545748Smckusick }
546