145748Smckusick /* 263379Sbostic * Copyright (c) 1991, 1993 363379Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*66087Shibler * @(#)vm_pageout.c 8.5 (Berkeley) 02/14/94 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 1748493Smckusick * 1848493Smckusick * Permission to use, copy, modify and distribute this software and 1948493Smckusick * its documentation is hereby granted, provided that both the copyright 2048493Smckusick * notice and this permission notice appear in all copies of the 2148493Smckusick * software, derivative works or modified versions, and any portions 2248493Smckusick * thereof, and that both notices appear in supporting documentation. 2348493Smckusick * 2448493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2548493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2648493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2748493Smckusick * 2848493Smckusick * Carnegie Mellon requests users of this software to return to 2948493Smckusick * 3048493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 3148493Smckusick * School of Computer Science 3248493Smckusick * Carnegie Mellon University 3348493Smckusick * Pittsburgh PA 15213-3890 3448493Smckusick * 3548493Smckusick * any improvements or extensions that they make and grant Carnegie the 3648493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * The proverbial page-out daemon. 4145748Smckusick */ 4245748Smckusick 4353359Sbostic #include <sys/param.h> 4445748Smckusick 4553359Sbostic #include <vm/vm.h> 4653359Sbostic #include <vm/vm_page.h> 4753359Sbostic #include <vm/vm_pageout.h> 4848386Skarels 4965690Shibler #ifndef VM_PAGE_FREE_MIN 5065690Shibler #define VM_PAGE_FREE_MIN (cnt.v_free_count / 20) 5165690Shibler #endif 5265690Shibler 5365690Shibler #ifndef VM_PAGE_FREE_TARGET 5465690Shibler #define VM_PAGE_FREE_TARGET ((cnt.v_free_min * 4) / 3) 5565690Shibler #endif 5665690Shibler 5765690Shibler int vm_page_free_min_min = 16 * 1024; 5865690Shibler int vm_page_free_min_max = 256 * 1024; 5965690Shibler 6050554Smckusick int vm_pages_needed; /* Event on which pageout daemon sleeps */ 6145748Smckusick 6261003Shibler int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 6361003Shibler 6465690Shibler #ifdef CLUSTERED_PAGEOUT 6565690Shibler #define MAXPOCLUSTER (MAXPHYS/NBPG) /* XXX */ 6665690Shibler int doclustered_pageout = 1; 6765690Shibler #endif 6865690Shibler 6945748Smckusick /* 7045748Smckusick * vm_pageout_scan does the dirty work for the pageout daemon. 7145748Smckusick */ 7253359Sbostic void 7345748Smckusick vm_pageout_scan() 7445748Smckusick { 7565231Smckusick register vm_page_t m, next; 7645748Smckusick register int page_shortage; 7745748Smckusick register int s; 7845748Smckusick register int pages_freed; 7945748Smckusick int free; 8065690Shibler vm_object_t object; 8145748Smckusick 8245748Smckusick /* 8345748Smckusick * Only continue when we want more pages to be "free" 8445748Smckusick */ 8545748Smckusick 8665690Shibler cnt.v_rev++; 8765690Shibler 8845748Smckusick s = splimp(); 8945748Smckusick simple_lock(&vm_page_queue_free_lock); 9050922Smckusick free = cnt.v_free_count; 9145748Smckusick simple_unlock(&vm_page_queue_free_lock); 9245748Smckusick splx(s); 9345748Smckusick 9450922Smckusick if (free < cnt.v_free_target) { 9545748Smckusick swapout_threads(); 9645748Smckusick 9745748Smckusick /* 9845748Smckusick * Be sure the pmap system is updated so 9945748Smckusick * we can scan the inactive queue. 10045748Smckusick */ 10145748Smckusick 10245748Smckusick pmap_update(); 10345748Smckusick } 10445748Smckusick 10545748Smckusick /* 10645748Smckusick * Acquire the resident page system lock, 10745748Smckusick * as we may be changing what's resident quite a bit. 10845748Smckusick */ 10945748Smckusick vm_page_lock_queues(); 11045748Smckusick 11145748Smckusick /* 11245748Smckusick * Start scanning the inactive queue for pages we can free. 11345748Smckusick * We keep scanning until we have enough free pages or 11445748Smckusick * we have scanned through the entire queue. If we 11545748Smckusick * encounter dirty pages, we start cleaning them. 11645748Smckusick */ 11745748Smckusick 11845748Smckusick pages_freed = 0; 11965231Smckusick for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) { 12045748Smckusick s = splimp(); 12145748Smckusick simple_lock(&vm_page_queue_free_lock); 12250922Smckusick free = cnt.v_free_count; 12345748Smckusick simple_unlock(&vm_page_queue_free_lock); 12445748Smckusick splx(s); 12550922Smckusick if (free >= cnt.v_free_target) 12645748Smckusick break; 12745748Smckusick 12865690Shibler cnt.v_scan++; 12965690Shibler next = m->pageq.tqe_next; 13065690Shibler 13156918Shibler /* 13256918Shibler * If the page has been referenced, move it back to the 13356918Shibler * active queue. 13456918Shibler */ 13556918Shibler if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 13656918Shibler vm_page_activate(m); 13756918Shibler cnt.v_reactivated++; 13856918Shibler continue; 13956918Shibler } 14056918Shibler 14156918Shibler /* 14256918Shibler * If the page is clean, free it up. 14356918Shibler */ 14456382Smckusick if (m->flags & PG_CLEAN) { 14556918Shibler object = m->object; 14656918Shibler if (vm_object_lock_try(object)) { 14749293Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), 14849293Shibler VM_PROT_NONE); 14956918Shibler vm_page_free(m); 15045748Smckusick pages_freed++; 15165690Shibler cnt.v_dfree++; 15245748Smckusick vm_object_unlock(object); 15345748Smckusick } 15456918Shibler continue; 15545748Smckusick } 15645748Smckusick 15756918Shibler /* 15856918Shibler * If the page is dirty but already being washed, skip it. 15956918Shibler */ 16065231Smckusick if ((m->flags & PG_LAUNDRY) == 0) 16156918Shibler continue; 16245748Smckusick 16356918Shibler /* 16456918Shibler * Otherwise the page is dirty and still in the laundry, 16556918Shibler * so we start the cleaning operation and remove it from 16656918Shibler * the laundry. 16756918Shibler */ 16856918Shibler object = m->object; 16965231Smckusick if (!vm_object_lock_try(object)) 17056918Shibler continue; 17156918Shibler cnt.v_pageouts++; 17265690Shibler #ifdef CLUSTERED_PAGEOUT 17365690Shibler if (object->pager && 17465690Shibler vm_pager_cancluster(object->pager, PG_CLUSTERPUT)) 17565690Shibler vm_pageout_cluster(m, object); 17665690Shibler else 17765690Shibler #endif 17865690Shibler vm_pageout_page(m, object); 17965690Shibler thread_wakeup((int) object); 18065690Shibler vm_object_unlock(object); 18165690Shibler /* 18265690Shibler * Former next page may no longer even be on the inactive 18365690Shibler * queue (due to potential blocking in the pager with the 18465690Shibler * queues unlocked). If it isn't, we just start over. 18565690Shibler */ 186*66087Shibler if (next && (next->flags & PG_INACTIVE) == 0) 18765690Shibler next = vm_page_queue_inactive.tqh_first; 18865690Shibler } 18965690Shibler 19065690Shibler /* 19165690Shibler * Compute the page shortage. If we are still very low on memory 19265690Shibler * be sure that we will move a minimal amount of pages from active 19365690Shibler * to inactive. 19465690Shibler */ 19545748Smckusick 19665690Shibler page_shortage = cnt.v_inactive_target - cnt.v_inactive_count; 19765690Shibler if (page_shortage <= 0 && pages_freed == 0) 19865690Shibler page_shortage = 1; 19965690Shibler 20065690Shibler while (page_shortage > 0) { 20156918Shibler /* 20265690Shibler * Move some more pages from active to inactive. 20356918Shibler */ 20465690Shibler 20565690Shibler if ((m = vm_page_queue_active.tqh_first) == NULL) 20665690Shibler break; 20765690Shibler vm_page_deactivate(m); 20865690Shibler page_shortage--; 20965690Shibler } 21065690Shibler 21165690Shibler vm_page_unlock_queues(); 21265690Shibler } 21365690Shibler 21465690Shibler /* 21565690Shibler * Called with object and page queues locked. 21665690Shibler * If reactivate is TRUE, a pager error causes the page to be 21765690Shibler * put back on the active queue, ow it is left on the inactive queue. 21865690Shibler */ 21965690Shibler void 22065690Shibler vm_pageout_page(m, object) 22165690Shibler vm_page_t m; 22265690Shibler vm_object_t object; 22365690Shibler { 22465690Shibler vm_pager_t pager; 22565690Shibler int pageout_status; 22665690Shibler 22765690Shibler /* 22865690Shibler * We set the busy bit to cause potential page faults on 22965690Shibler * this page to block. 23065690Shibler * 23165690Shibler * We also set pageout-in-progress to keep the object from 23265690Shibler * disappearing during pageout. This guarantees that the 23365690Shibler * page won't move from the inactive queue. (However, any 23465690Shibler * other page on the inactive queue may move!) 23565690Shibler */ 23665690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 23765690Shibler m->flags |= PG_BUSY; 23865690Shibler 23965690Shibler /* 24065690Shibler * Try to collapse the object before making a pager for it. 24165690Shibler * We must unlock the page queues first. 24265690Shibler */ 24365690Shibler vm_page_unlock_queues(); 24465690Shibler if (object->pager == NULL) 24556918Shibler vm_object_collapse(object); 24645748Smckusick 24765690Shibler object->paging_in_progress++; 24865690Shibler vm_object_unlock(object); 24945748Smckusick 25065690Shibler /* 25165690Shibler * Do a wakeup here in case the following operations block. 25265690Shibler */ 25365690Shibler thread_wakeup((int) &cnt.v_free_count); 25465690Shibler 25565690Shibler /* 25665690Shibler * If there is no pager for the page, use the default pager. 25765690Shibler * If there is no place to put the page at the moment, 25865690Shibler * leave it in the laundry and hope that there will be 25965690Shibler * paging space later. 26065690Shibler */ 26165690Shibler if ((pager = object->pager) == NULL) { 26265690Shibler pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, object->size, 26365690Shibler VM_PROT_ALL, (vm_offset_t)0); 26465690Shibler if (pager != NULL) 26565690Shibler vm_object_setpager(object, pager, 0, FALSE); 26665690Shibler } 26765690Shibler pageout_status = pager ? vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL; 26865690Shibler vm_object_lock(object); 26965690Shibler vm_page_lock_queues(); 27065690Shibler 27165690Shibler switch (pageout_status) { 27265690Shibler case VM_PAGER_OK: 27365690Shibler case VM_PAGER_PEND: 27465690Shibler cnt.v_pgpgout++; 27565690Shibler m->flags &= ~PG_LAUNDRY; 27665690Shibler break; 27765690Shibler case VM_PAGER_BAD: 27856918Shibler /* 27965690Shibler * Page outside of range of object. Right now we 28065690Shibler * essentially lose the changes by pretending it 28165690Shibler * worked. 28265690Shibler * 28365690Shibler * XXX dubious, what should we do? 28456918Shibler */ 28565690Shibler m->flags &= ~PG_LAUNDRY; 28665690Shibler m->flags |= PG_CLEAN; 28765690Shibler pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 28865690Shibler break; 28965690Shibler case VM_PAGER_AGAIN: 29065690Shibler { 29165690Shibler extern int lbolt; 29245748Smckusick 29356918Shibler /* 29465690Shibler * FAIL on a write is interpreted to mean a resource 29565690Shibler * shortage, so we put pause for awhile and try again. 29665690Shibler * XXX could get stuck here. 29756918Shibler */ 29865690Shibler (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0); 29965690Shibler break; 30065690Shibler } 30165690Shibler case VM_PAGER_FAIL: 30265690Shibler case VM_PAGER_ERROR: 30365690Shibler /* 30465690Shibler * If page couldn't be paged out, then reactivate 30565690Shibler * the page so it doesn't clog the inactive list. 30665690Shibler * (We will try paging out it again later). 30765690Shibler */ 30865690Shibler vm_page_activate(m); 30965690Shibler cnt.v_reactivated++; 31065690Shibler break; 31165690Shibler } 31245748Smckusick 31365690Shibler pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 31465690Shibler 31565690Shibler /* 31665690Shibler * If the operation is still going, leave the page busy 31765690Shibler * to block all other accesses. Also, leave the paging 31865690Shibler * in progress indicator set so that we don't attempt an 31965690Shibler * object collapse. 32065690Shibler */ 32165690Shibler if (pageout_status != VM_PAGER_PEND) { 32265690Shibler m->flags &= ~PG_BUSY; 32365690Shibler PAGE_WAKEUP(m); 32465690Shibler object->paging_in_progress--; 32565690Shibler } 32665690Shibler } 32765690Shibler 32865690Shibler #ifdef CLUSTERED_PAGEOUT 32965690Shibler #define PAGEOUTABLE(p) \ 33065690Shibler ((((p)->flags & (PG_INACTIVE|PG_CLEAN|PG_LAUNDRY)) == \ 33165690Shibler (PG_INACTIVE|PG_LAUNDRY)) && !pmap_is_referenced(VM_PAGE_TO_PHYS(p))) 33265690Shibler 33365690Shibler /* 33465690Shibler * Attempt to pageout as many contiguous (to ``m'') dirty pages as possible 33565690Shibler * from ``object''. Using information returned from the pager, we assemble 33665690Shibler * a sorted list of contiguous dirty pages and feed them to the pager in one 33765690Shibler * chunk. Called with paging queues and object locked. Also, object must 33865690Shibler * already have a pager. 33965690Shibler */ 34065690Shibler void 34165690Shibler vm_pageout_cluster(m, object) 34265690Shibler vm_page_t m; 34365690Shibler vm_object_t object; 34465690Shibler { 34565690Shibler vm_offset_t offset, loff, hoff; 34665690Shibler vm_page_t plist[MAXPOCLUSTER], *plistp, p; 34765690Shibler int postatus, ix, count; 34865690Shibler 34965690Shibler /* 35065690Shibler * Determine the range of pages that can be part of a cluster 35165690Shibler * for this object/offset. If it is only our single page, just 35265690Shibler * do it normally. 35365690Shibler */ 35465690Shibler vm_pager_cluster(object->pager, m->offset, &loff, &hoff); 35565690Shibler if (hoff - loff == PAGE_SIZE) { 35665690Shibler vm_pageout_page(m, object); 35765690Shibler return; 35865690Shibler } 35965690Shibler 36065690Shibler plistp = plist; 36165690Shibler 36265690Shibler /* 36365690Shibler * Target page is always part of the cluster. 36465690Shibler */ 36565690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 36665690Shibler m->flags |= PG_BUSY; 36765690Shibler plistp[atop(m->offset - loff)] = m; 36865690Shibler count = 1; 36965690Shibler 37065690Shibler /* 37165690Shibler * Backup from the given page til we find one not fulfilling 37265690Shibler * the pageout criteria or we hit the lower bound for the 37365690Shibler * cluster. For each page determined to be part of the 37465690Shibler * cluster, unmap it and busy it out so it won't change. 37565690Shibler */ 37665690Shibler ix = atop(m->offset - loff); 37765690Shibler offset = m->offset; 37865690Shibler while (offset > loff && count < MAXPOCLUSTER-1) { 37965690Shibler p = vm_page_lookup(object, offset - PAGE_SIZE); 38065690Shibler if (p == NULL || !PAGEOUTABLE(p)) 38165690Shibler break; 38265690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 38365690Shibler p->flags |= PG_BUSY; 38465690Shibler plistp[--ix] = p; 38565690Shibler offset -= PAGE_SIZE; 38665690Shibler count++; 38765690Shibler } 38865690Shibler plistp += atop(offset - loff); 38965690Shibler loff = offset; 39065690Shibler 39165690Shibler /* 39265690Shibler * Now do the same moving forward from the target. 39365690Shibler */ 39465690Shibler ix = atop(m->offset - loff) + 1; 39565690Shibler offset = m->offset + PAGE_SIZE; 39665690Shibler while (offset < hoff && count < MAXPOCLUSTER) { 39765690Shibler p = vm_page_lookup(object, offset); 39865690Shibler if (p == NULL || !PAGEOUTABLE(p)) 39965690Shibler break; 40065690Shibler pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 40165690Shibler p->flags |= PG_BUSY; 40265690Shibler plistp[ix++] = p; 40365690Shibler offset += PAGE_SIZE; 40465690Shibler count++; 40565690Shibler } 40665690Shibler hoff = offset; 40765690Shibler 40865690Shibler /* 40965690Shibler * Pageout the page. 41065690Shibler * Unlock everything and do a wakeup prior to the pager call 41165690Shibler * in case it blocks. 41265690Shibler */ 41365690Shibler vm_page_unlock_queues(); 41465690Shibler object->paging_in_progress++; 41565690Shibler vm_object_unlock(object); 41665690Shibler again: 41765690Shibler thread_wakeup((int) &cnt.v_free_count); 41865690Shibler postatus = vm_pager_put_pages(object->pager, plistp, count, FALSE); 41965690Shibler /* 42065690Shibler * XXX rethink this 42165690Shibler */ 42265690Shibler if (postatus == VM_PAGER_AGAIN) { 42365690Shibler extern int lbolt; 42465690Shibler 42565690Shibler (void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0); 42665690Shibler goto again; 42765690Shibler } else if (postatus == VM_PAGER_BAD) 42865690Shibler panic("vm_pageout_cluster: VM_PAGER_BAD"); 42965690Shibler vm_object_lock(object); 43065690Shibler vm_page_lock_queues(); 43165690Shibler 43265690Shibler /* 43365690Shibler * Loop through the affected pages, reflecting the outcome of 43465690Shibler * the operation. 43565690Shibler */ 43665690Shibler for (ix = 0; ix < count; ix++) { 43765690Shibler p = *plistp++; 43865690Shibler switch (postatus) { 43956918Shibler case VM_PAGER_OK: 44056918Shibler case VM_PAGER_PEND: 44165690Shibler cnt.v_pgpgout++; 44265690Shibler p->flags &= ~PG_LAUNDRY; 44356918Shibler break; 44456918Shibler case VM_PAGER_FAIL: 44556918Shibler case VM_PAGER_ERROR: 44656918Shibler /* 44765690Shibler * Pageout failed, reactivate the target page so it 44865690Shibler * doesn't clog the inactive list. Other pages are 44965690Shibler * left as they are. 45056918Shibler */ 45165690Shibler if (p == m) { 45265690Shibler vm_page_activate(p); 45365690Shibler cnt.v_reactivated++; 45465690Shibler } 45556918Shibler break; 45656918Shibler } 45765690Shibler pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 45856918Shibler /* 45956918Shibler * If the operation is still going, leave the page busy 46065690Shibler * to block all other accesses. 46156918Shibler */ 46265690Shibler if (postatus != VM_PAGER_PEND) { 46365690Shibler p->flags &= ~PG_BUSY; 46465690Shibler PAGE_WAKEUP(p); 46565690Shibler 46645748Smckusick } 46745748Smckusick } 46845748Smckusick /* 46965690Shibler * If the operation is still going, leave the paging in progress 47065690Shibler * indicator set so that we don't attempt an object collapse. 47145748Smckusick */ 47265690Shibler if (postatus != VM_PAGER_PEND) 47365690Shibler object->paging_in_progress--; 47445748Smckusick 47545748Smckusick } 47665690Shibler #endif 47745748Smckusick 47845748Smckusick /* 47945748Smckusick * vm_pageout is the high level pageout daemon. 48045748Smckusick */ 48145748Smckusick 48245748Smckusick void vm_pageout() 48345748Smckusick { 48445748Smckusick (void) spl0(); 48545748Smckusick 48645748Smckusick /* 48745748Smckusick * Initialize some paging parameters. 48845748Smckusick */ 48945748Smckusick 49050922Smckusick if (cnt.v_free_min == 0) { 49165690Shibler cnt.v_free_min = VM_PAGE_FREE_MIN; 49265690Shibler vm_page_free_min_min /= cnt.v_page_size; 49365690Shibler vm_page_free_min_max /= cnt.v_page_size; 49465690Shibler if (cnt.v_free_min < vm_page_free_min_min) 49565690Shibler cnt.v_free_min = vm_page_free_min_min; 49665690Shibler if (cnt.v_free_min > vm_page_free_min_max) 49765690Shibler cnt.v_free_min = vm_page_free_min_max; 49845748Smckusick } 49945748Smckusick 50050922Smckusick if (cnt.v_free_target == 0) 50165690Shibler cnt.v_free_target = VM_PAGE_FREE_TARGET; 50245748Smckusick 50350922Smckusick if (cnt.v_free_target <= cnt.v_free_min) 50450922Smckusick cnt.v_free_target = cnt.v_free_min + 1; 50545748Smckusick 50661003Shibler /* XXX does not really belong here */ 50761003Shibler if (vm_page_max_wired == 0) 50861003Shibler vm_page_max_wired = cnt.v_free_count / 3; 50961003Shibler 51045748Smckusick /* 51145748Smckusick * The pageout daemon is never done, so loop 51245748Smckusick * forever. 51345748Smckusick */ 51445748Smckusick 51545748Smckusick simple_lock(&vm_pages_needed_lock); 51645748Smckusick while (TRUE) { 51745748Smckusick thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, 51845748Smckusick FALSE); 51956918Shibler /* 52056918Shibler * Compute the inactive target for this scan. 52156918Shibler * We need to keep a reasonable amount of memory in the 52256918Shibler * inactive list to better simulate LRU behavior. 52356918Shibler */ 52456918Shibler cnt.v_inactive_target = 52556918Shibler (cnt.v_active_count + cnt.v_inactive_count) / 3; 52656918Shibler if (cnt.v_inactive_target <= cnt.v_free_target) 52756918Shibler cnt.v_inactive_target = cnt.v_free_target + 1; 52856918Shibler 52965690Shibler /* 53065690Shibler * Only make a scan if we are likely to do something. 53165690Shibler * Otherwise we might have been awakened by a pager 53265690Shibler * to clean up async pageouts. 53365690Shibler */ 53465690Shibler if (cnt.v_free_count < cnt.v_free_target || 53565690Shibler cnt.v_inactive_count < cnt.v_inactive_target) 53665690Shibler vm_pageout_scan(); 53745748Smckusick vm_pager_sync(); 53845748Smckusick simple_lock(&vm_pages_needed_lock); 53950922Smckusick thread_wakeup((int) &cnt.v_free_count); 54045748Smckusick } 54145748Smckusick } 542