145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 8*48493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*48493Smckusick * @(#)vm_pageout.c 7.3 (Berkeley) 04/21/91 11*48493Smckusick * 12*48493Smckusick * 13*48493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14*48493Smckusick * All rights reserved. 15*48493Smckusick * 16*48493Smckusick * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17*48493Smckusick * 18*48493Smckusick * Permission to use, copy, modify and distribute this software and 19*48493Smckusick * its documentation is hereby granted, provided that both the copyright 20*48493Smckusick * notice and this permission notice appear in all copies of the 21*48493Smckusick * software, derivative works or modified versions, and any portions 22*48493Smckusick * thereof, and that both notices appear in supporting documentation. 23*48493Smckusick * 24*48493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25*48493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26*48493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27*48493Smckusick * 28*48493Smckusick * Carnegie Mellon requests users of this software to return to 29*48493Smckusick * 30*48493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31*48493Smckusick * School of Computer Science 32*48493Smckusick * Carnegie Mellon University 33*48493Smckusick * Pittsburgh PA 15213-3890 34*48493Smckusick * 35*48493Smckusick * any improvements or extensions that they make and grant Carnegie the 36*48493Smckusick * rights to redistribute these changes. 3745748Smckusick */ 3845748Smckusick 3945748Smckusick /* 4045748Smckusick * The proverbial page-out daemon. 4145748Smckusick */ 4245748Smckusick 4348386Skarels #include "param.h" 4445748Smckusick 4548386Skarels #include "vm.h" 4648386Skarels #include "vm_page.h" 4748386Skarels #include "vm_pageout.h" 4848386Skarels 4945748Smckusick int vm_pages_needed; /* Event on which pageout daemon sleeps */ 5045748Smckusick int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 5145748Smckusick 5245748Smckusick int vm_page_free_min_sanity = 40; 5345748Smckusick 5445748Smckusick /* 5545748Smckusick * vm_pageout_scan does the dirty work for the pageout daemon. 5645748Smckusick */ 5745748Smckusick vm_pageout_scan() 5845748Smckusick { 5945748Smckusick register vm_page_t m; 6045748Smckusick register int page_shortage; 6145748Smckusick register int s; 6245748Smckusick register int pages_freed; 6345748Smckusick int free; 6445748Smckusick 6545748Smckusick /* 6645748Smckusick * Only continue when we want more pages to be "free" 6745748Smckusick */ 6845748Smckusick 6945748Smckusick s = splimp(); 7045748Smckusick simple_lock(&vm_page_queue_free_lock); 7145748Smckusick free = vm_page_free_count; 7245748Smckusick simple_unlock(&vm_page_queue_free_lock); 7345748Smckusick splx(s); 7445748Smckusick 7545748Smckusick if (free < vm_page_free_target) { 7645748Smckusick swapout_threads(); 7745748Smckusick 7845748Smckusick /* 7945748Smckusick * Be sure the pmap system is updated so 8045748Smckusick * we can scan the inactive queue. 8145748Smckusick */ 8245748Smckusick 8345748Smckusick pmap_update(); 8445748Smckusick } 8545748Smckusick 8645748Smckusick /* 8745748Smckusick * Acquire the resident page system lock, 8845748Smckusick * as we may be changing what's resident quite a bit. 8945748Smckusick */ 9045748Smckusick vm_page_lock_queues(); 9145748Smckusick 9245748Smckusick /* 9345748Smckusick * Start scanning the inactive queue for pages we can free. 9445748Smckusick * We keep scanning until we have enough free pages or 9545748Smckusick * we have scanned through the entire queue. If we 9645748Smckusick * encounter dirty pages, we start cleaning them. 9745748Smckusick */ 9845748Smckusick 9945748Smckusick pages_freed = 0; 10045748Smckusick m = (vm_page_t) queue_first(&vm_page_queue_inactive); 10145748Smckusick while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { 10245748Smckusick vm_page_t next; 10345748Smckusick 10445748Smckusick s = splimp(); 10545748Smckusick simple_lock(&vm_page_queue_free_lock); 10645748Smckusick free = vm_page_free_count; 10745748Smckusick simple_unlock(&vm_page_queue_free_lock); 10845748Smckusick splx(s); 10945748Smckusick 11045748Smckusick if (free >= vm_page_free_target) 11145748Smckusick break; 11245748Smckusick 11345748Smckusick if (m->clean) { 11445748Smckusick next = (vm_page_t) queue_next(&m->pageq); 11545748Smckusick if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 11645748Smckusick vm_page_activate(m); 11745748Smckusick vm_stat.reactivations++; 11845748Smckusick } 11945748Smckusick else { 12045748Smckusick register vm_object_t object; 12145748Smckusick object = m->object; 12245748Smckusick if (!vm_object_lock_try(object)) { 12345748Smckusick /* 12445748Smckusick * Can't lock object - 12545748Smckusick * skip page. 12645748Smckusick */ 12745748Smckusick m = next; 12845748Smckusick continue; 12945748Smckusick } 13045748Smckusick pmap_remove_all(VM_PAGE_TO_PHYS(m)); 13145748Smckusick vm_page_free(m); /* will dequeue */ 13245748Smckusick pages_freed++; 13345748Smckusick vm_object_unlock(object); 13445748Smckusick } 13545748Smckusick m = next; 13645748Smckusick } 13745748Smckusick else { 13845748Smckusick /* 13945748Smckusick * If a page is dirty, then it is either 14045748Smckusick * being washed (but not yet cleaned) 14145748Smckusick * or it is still in the laundry. If it is 14245748Smckusick * still in the laundry, then we start the 14345748Smckusick * cleaning operation. 14445748Smckusick */ 14545748Smckusick 14645748Smckusick if (m->laundry) { 14745748Smckusick /* 14845748Smckusick * Clean the page and remove it from the 14945748Smckusick * laundry. 15045748Smckusick * 15145748Smckusick * We set the busy bit to cause 15245748Smckusick * potential page faults on this page to 15345748Smckusick * block. 15445748Smckusick * 15545748Smckusick * And we set pageout-in-progress to keep 15645748Smckusick * the object from disappearing during 15745748Smckusick * pageout. This guarantees that the 15845748Smckusick * page won't move from the inactive 15945748Smckusick * queue. (However, any other page on 16045748Smckusick * the inactive queue may move!) 16145748Smckusick */ 16245748Smckusick 16345748Smckusick register vm_object_t object; 16445748Smckusick register vm_pager_t pager; 16545748Smckusick int pageout_status; 16645748Smckusick 16745748Smckusick object = m->object; 16845748Smckusick if (!vm_object_lock_try(object)) { 16945748Smckusick /* 17045748Smckusick * Skip page if we can't lock 17145748Smckusick * its object 17245748Smckusick */ 17345748Smckusick m = (vm_page_t) queue_next(&m->pageq); 17445748Smckusick continue; 17545748Smckusick } 17645748Smckusick 17745748Smckusick pmap_remove_all(VM_PAGE_TO_PHYS(m)); 17845748Smckusick m->busy = TRUE; 17945748Smckusick vm_stat.pageouts++; 18045748Smckusick 18145748Smckusick /* 18245748Smckusick * Try to collapse the object before 18345748Smckusick * making a pager for it. We must 18445748Smckusick * unlock the page queues first. 18545748Smckusick */ 18645748Smckusick vm_page_unlock_queues(); 18745748Smckusick 18845748Smckusick vm_object_collapse(object); 18945748Smckusick 19045748Smckusick object->paging_in_progress++; 19145748Smckusick vm_object_unlock(object); 19245748Smckusick 19345748Smckusick /* 19445748Smckusick * Do a wakeup here in case the following 19545748Smckusick * operations block. 19645748Smckusick */ 19745748Smckusick thread_wakeup((int) &vm_page_free_count); 19845748Smckusick 19945748Smckusick /* 20045748Smckusick * If there is no pager for the page, 20145748Smckusick * use the default pager. If there's 20245748Smckusick * no place to put the page at the 20345748Smckusick * moment, leave it in the laundry and 20445748Smckusick * hope that there will be paging space 20545748Smckusick * later. 20645748Smckusick */ 20745748Smckusick 20848386Skarels if ((pager = object->pager) == NULL) { 20945748Smckusick pager = vm_pager_allocate(PG_DFLT, 21045748Smckusick (caddr_t)0, 21145748Smckusick object->size, 21245748Smckusick VM_PROT_ALL); 21348386Skarels if (pager != NULL) { 21445748Smckusick vm_object_setpager(object, 21545748Smckusick pager, 0, FALSE); 21645748Smckusick } 21745748Smckusick } 21845748Smckusick pageout_status = pager ? 21945748Smckusick vm_pager_put(pager, m, FALSE) : 22045748Smckusick VM_PAGER_FAIL; 22145748Smckusick vm_object_lock(object); 22245748Smckusick vm_page_lock_queues(); 22345748Smckusick next = (vm_page_t) queue_next(&m->pageq); 22445748Smckusick 22545748Smckusick switch (pageout_status) { 22645748Smckusick case VM_PAGER_OK: 22745748Smckusick case VM_PAGER_PEND: 22845748Smckusick m->laundry = FALSE; 22945748Smckusick break; 23045748Smckusick case VM_PAGER_BAD: 23145748Smckusick /* 23245748Smckusick * Page outside of range of object. 23345748Smckusick * Right now we essentially lose the 23445748Smckusick * changes by pretending it worked. 23545748Smckusick * XXX dubious, what should we do? 23645748Smckusick */ 23745748Smckusick m->laundry = FALSE; 23845748Smckusick m->clean = TRUE; 23945748Smckusick pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 24045748Smckusick break; 24145748Smckusick case VM_PAGER_FAIL: 24245748Smckusick /* 24345748Smckusick * If page couldn't be paged out, then 24445748Smckusick * reactivate the page so it doesn't 24545748Smckusick * clog the inactive list. (We will 24645748Smckusick * try paging out it again later). 24745748Smckusick */ 24845748Smckusick vm_page_activate(m); 24945748Smckusick break; 25045748Smckusick } 25145748Smckusick 25245748Smckusick pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 25345748Smckusick m->busy = FALSE; 25445748Smckusick PAGE_WAKEUP(m); 25545748Smckusick 25645748Smckusick /* 25745748Smckusick * If the operation is still going, leave the 25845748Smckusick * paging in progress indicator set so that we 25945748Smckusick * don't attempt an object collapse. 26045748Smckusick */ 26145748Smckusick if (pageout_status != VM_PAGER_PEND) 26245748Smckusick object->paging_in_progress--; 26345748Smckusick thread_wakeup((int) object); 26445748Smckusick vm_object_unlock(object); 26545748Smckusick m = next; 26645748Smckusick } 26745748Smckusick else 26845748Smckusick m = (vm_page_t) queue_next(&m->pageq); 26945748Smckusick } 27045748Smckusick } 27145748Smckusick 27245748Smckusick /* 27345748Smckusick * Compute the page shortage. If we are still very low on memory 27445748Smckusick * be sure that we will move a minimal amount of pages from active 27545748Smckusick * to inactive. 27645748Smckusick */ 27745748Smckusick 27845748Smckusick page_shortage = vm_page_inactive_target - vm_page_inactive_count; 27945748Smckusick page_shortage -= vm_page_free_count; 28045748Smckusick 28145748Smckusick if ((page_shortage <= 0) && (pages_freed == 0)) 28245748Smckusick page_shortage = 1; 28345748Smckusick 28445748Smckusick while (page_shortage > 0) { 28545748Smckusick /* 28645748Smckusick * Move some more pages from active to inactive. 28745748Smckusick */ 28845748Smckusick 28945748Smckusick if (queue_empty(&vm_page_queue_active)) { 29045748Smckusick break; 29145748Smckusick } 29245748Smckusick m = (vm_page_t) queue_first(&vm_page_queue_active); 29345748Smckusick vm_page_deactivate(m); 29445748Smckusick page_shortage--; 29545748Smckusick } 29645748Smckusick 29745748Smckusick vm_page_unlock_queues(); 29845748Smckusick } 29945748Smckusick 30045748Smckusick /* 30145748Smckusick * vm_pageout is the high level pageout daemon. 30245748Smckusick */ 30345748Smckusick 30445748Smckusick void vm_pageout() 30545748Smckusick { 30645748Smckusick (void) spl0(); 30745748Smckusick 30845748Smckusick /* 30945748Smckusick * Initialize some paging parameters. 31045748Smckusick */ 31145748Smckusick 31245748Smckusick if (vm_page_free_min == 0) { 31345748Smckusick vm_page_free_min = vm_page_free_count / 20; 31445748Smckusick if (vm_page_free_min < 3) 31545748Smckusick vm_page_free_min = 3; 31645748Smckusick 31745748Smckusick if (vm_page_free_min > vm_page_free_min_sanity) 31845748Smckusick vm_page_free_min = vm_page_free_min_sanity; 31945748Smckusick } 32045748Smckusick 32145748Smckusick if (vm_page_free_reserved == 0) { 32245748Smckusick if ((vm_page_free_reserved = vm_page_free_min / 2) < 10) 32345748Smckusick vm_page_free_reserved = 10; 32445748Smckusick } 32545748Smckusick if (vm_pageout_free_min == 0) { 32645748Smckusick if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10) 32745748Smckusick vm_pageout_free_min = 10; 32845748Smckusick } 32945748Smckusick 33045748Smckusick if (vm_page_free_target == 0) 33145748Smckusick vm_page_free_target = (vm_page_free_min * 4) / 3; 33245748Smckusick 33345748Smckusick if (vm_page_inactive_target == 0) 33445748Smckusick vm_page_inactive_target = vm_page_free_min * 2; 33545748Smckusick 33645748Smckusick if (vm_page_free_target <= vm_page_free_min) 33745748Smckusick vm_page_free_target = vm_page_free_min + 1; 33845748Smckusick 33945748Smckusick if (vm_page_inactive_target <= vm_page_free_target) 34045748Smckusick vm_page_inactive_target = vm_page_free_target + 1; 34145748Smckusick 34245748Smckusick /* 34345748Smckusick * The pageout daemon is never done, so loop 34445748Smckusick * forever. 34545748Smckusick */ 34645748Smckusick 34745748Smckusick simple_lock(&vm_pages_needed_lock); 34845748Smckusick while (TRUE) { 34945748Smckusick thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, 35045748Smckusick FALSE); 35145748Smckusick vm_pageout_scan(); 35245748Smckusick vm_pager_sync(); 35345748Smckusick simple_lock(&vm_pages_needed_lock); 35445748Smckusick thread_wakeup((int) &vm_page_free_count); 35545748Smckusick } 35645748Smckusick } 357