xref: /csrg-svn/sys/vm/vm_pageout.c (revision 49293)
145748Smckusick /*
245748Smckusick  * Copyright (c) 1991 Regents of the University of California.
345748Smckusick  * All rights reserved.
445748Smckusick  *
545748Smckusick  * This code is derived from software contributed to Berkeley by
645748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
745748Smckusick  *
848493Smckusick  * %sccs.include.redist.c%
945748Smckusick  *
10*49293Shibler  *	@(#)vm_pageout.c	7.4 (Berkeley) 05/07/91
1148493Smckusick  *
1248493Smckusick  *
1348493Smckusick  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
1448493Smckusick  * All rights reserved.
1548493Smckusick  *
1648493Smckusick  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1748493Smckusick  *
1848493Smckusick  * Permission to use, copy, modify and distribute this software and
1948493Smckusick  * its documentation is hereby granted, provided that both the copyright
2048493Smckusick  * notice and this permission notice appear in all copies of the
2148493Smckusick  * software, derivative works or modified versions, and any portions
2248493Smckusick  * thereof, and that both notices appear in supporting documentation.
2348493Smckusick  *
2448493Smckusick  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2548493Smckusick  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2648493Smckusick  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2748493Smckusick  *
2848493Smckusick  * Carnegie Mellon requests users of this software to return to
2948493Smckusick  *
3048493Smckusick  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
3148493Smckusick  *  School of Computer Science
3248493Smckusick  *  Carnegie Mellon University
3348493Smckusick  *  Pittsburgh PA 15213-3890
3448493Smckusick  *
3548493Smckusick  * any improvements or extensions that they make and grant Carnegie the
3648493Smckusick  * rights to redistribute these changes.
3745748Smckusick  */
3845748Smckusick 
3945748Smckusick /*
4045748Smckusick  *	The proverbial page-out daemon.
4145748Smckusick  */
4245748Smckusick 
4348386Skarels #include "param.h"
4445748Smckusick 
4548386Skarels #include "vm.h"
4648386Skarels #include "vm_page.h"
4748386Skarels #include "vm_pageout.h"
4848386Skarels 
4945748Smckusick int	vm_pages_needed;		/* Event on which pageout daemon sleeps */
5045748Smckusick int	vm_pageout_free_min = 0;	/* Stop pageout to wait for pagers at this free level */
5145748Smckusick 
5245748Smckusick int	vm_page_free_min_sanity = 40;
5345748Smckusick 
5445748Smckusick /*
5545748Smckusick  *	vm_pageout_scan does the dirty work for the pageout daemon.
5645748Smckusick  */
5745748Smckusick vm_pageout_scan()
5845748Smckusick {
5945748Smckusick 	register vm_page_t	m;
6045748Smckusick 	register int		page_shortage;
6145748Smckusick 	register int		s;
6245748Smckusick 	register int		pages_freed;
6345748Smckusick 	int			free;
6445748Smckusick 
6545748Smckusick 	/*
6645748Smckusick 	 *	Only continue when we want more pages to be "free"
6745748Smckusick 	 */
6845748Smckusick 
6945748Smckusick 	s = splimp();
7045748Smckusick 	simple_lock(&vm_page_queue_free_lock);
7145748Smckusick 	free = vm_page_free_count;
7245748Smckusick 	simple_unlock(&vm_page_queue_free_lock);
7345748Smckusick 	splx(s);
7445748Smckusick 
7545748Smckusick 	if (free < vm_page_free_target) {
7645748Smckusick 		swapout_threads();
7745748Smckusick 
7845748Smckusick 		/*
7945748Smckusick 		 *	Be sure the pmap system is updated so
8045748Smckusick 		 *	we can scan the inactive queue.
8145748Smckusick 		 */
8245748Smckusick 
8345748Smckusick 		pmap_update();
8445748Smckusick 	}
8545748Smckusick 
8645748Smckusick 	/*
8745748Smckusick 	 *	Acquire the resident page system lock,
8845748Smckusick 	 *	as we may be changing what's resident quite a bit.
8945748Smckusick 	 */
9045748Smckusick 	vm_page_lock_queues();
9145748Smckusick 
9245748Smckusick 	/*
9345748Smckusick 	 *	Start scanning the inactive queue for pages we can free.
9445748Smckusick 	 *	We keep scanning until we have enough free pages or
9545748Smckusick 	 *	we have scanned through the entire queue.  If we
9645748Smckusick 	 *	encounter dirty pages, we start cleaning them.
9745748Smckusick 	 */
9845748Smckusick 
9945748Smckusick 	pages_freed = 0;
10045748Smckusick 	m = (vm_page_t) queue_first(&vm_page_queue_inactive);
10145748Smckusick 	while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
10245748Smckusick 		vm_page_t	next;
10345748Smckusick 
10445748Smckusick 		s = splimp();
10545748Smckusick 		simple_lock(&vm_page_queue_free_lock);
10645748Smckusick 		free = vm_page_free_count;
10745748Smckusick 		simple_unlock(&vm_page_queue_free_lock);
10845748Smckusick 		splx(s);
10945748Smckusick 
11045748Smckusick 		if (free >= vm_page_free_target)
11145748Smckusick 			break;
11245748Smckusick 
11345748Smckusick 		if (m->clean) {
11445748Smckusick 			next = (vm_page_t) queue_next(&m->pageq);
11545748Smckusick 			if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
11645748Smckusick 				vm_page_activate(m);
11745748Smckusick 				vm_stat.reactivations++;
11845748Smckusick 			}
11945748Smckusick 			else {
12045748Smckusick 				register vm_object_t	object;
12145748Smckusick 				object = m->object;
12245748Smckusick 				if (!vm_object_lock_try(object)) {
12345748Smckusick 					/*
12445748Smckusick 					 *	Can't lock object -
12545748Smckusick 					 *	skip page.
12645748Smckusick 					 */
12745748Smckusick 					m = next;
12845748Smckusick 					continue;
12945748Smckusick 				}
130*49293Shibler 				pmap_page_protect(VM_PAGE_TO_PHYS(m),
131*49293Shibler 						  VM_PROT_NONE);
13245748Smckusick 				vm_page_free(m);	/* will dequeue */
13345748Smckusick 				pages_freed++;
13445748Smckusick 				vm_object_unlock(object);
13545748Smckusick 			}
13645748Smckusick 			m = next;
13745748Smckusick 		}
13845748Smckusick 		else {
13945748Smckusick 			/*
14045748Smckusick 			 *	If a page is dirty, then it is either
14145748Smckusick 			 *	being washed (but not yet cleaned)
14245748Smckusick 			 *	or it is still in the laundry.  If it is
14345748Smckusick 			 *	still in the laundry, then we start the
14445748Smckusick 			 *	cleaning operation.
14545748Smckusick 			 */
14645748Smckusick 
14745748Smckusick 			if (m->laundry) {
14845748Smckusick 				/*
14945748Smckusick 				 *	Clean the page and remove it from the
15045748Smckusick 				 *	laundry.
15145748Smckusick 				 *
15245748Smckusick 				 *	We set the busy bit to cause
15345748Smckusick 				 *	potential page faults on this page to
15445748Smckusick 				 *	block.
15545748Smckusick 				 *
15645748Smckusick 				 *	And we set pageout-in-progress to keep
15745748Smckusick 				 *	the object from disappearing during
15845748Smckusick 				 *	pageout.  This guarantees that the
15945748Smckusick 				 *	page won't move from the inactive
16045748Smckusick 				 *	queue.  (However, any other page on
16145748Smckusick 				 *	the inactive queue may move!)
16245748Smckusick 				 */
16345748Smckusick 
16445748Smckusick 				register vm_object_t	object;
16545748Smckusick 				register vm_pager_t	pager;
16645748Smckusick 				int			pageout_status;
16745748Smckusick 
16845748Smckusick 				object = m->object;
16945748Smckusick 				if (!vm_object_lock_try(object)) {
17045748Smckusick 					/*
17145748Smckusick 					 *	Skip page if we can't lock
17245748Smckusick 					 *	its object
17345748Smckusick 					 */
17445748Smckusick 					m = (vm_page_t) queue_next(&m->pageq);
17545748Smckusick 					continue;
17645748Smckusick 				}
17745748Smckusick 
178*49293Shibler 				pmap_page_protect(VM_PAGE_TO_PHYS(m),
179*49293Shibler 						  VM_PROT_NONE);
18045748Smckusick 				m->busy = TRUE;
18145748Smckusick 				vm_stat.pageouts++;
18245748Smckusick 
18345748Smckusick 				/*
18445748Smckusick 				 *	Try to collapse the object before
18545748Smckusick 				 *	making a pager for it.  We must
18645748Smckusick 				 *	unlock the page queues first.
18745748Smckusick 				 */
18845748Smckusick 				vm_page_unlock_queues();
18945748Smckusick 
19045748Smckusick 				vm_object_collapse(object);
19145748Smckusick 
19245748Smckusick 				object->paging_in_progress++;
19345748Smckusick 				vm_object_unlock(object);
19445748Smckusick 
19545748Smckusick 				/*
19645748Smckusick 				 *	Do a wakeup here in case the following
19745748Smckusick 				 *	operations block.
19845748Smckusick 				 */
19945748Smckusick 				thread_wakeup((int) &vm_page_free_count);
20045748Smckusick 
20145748Smckusick 				/*
20245748Smckusick 				 *	If there is no pager for the page,
20345748Smckusick 				 *	use the default pager.  If there's
20445748Smckusick 				 *	no place to put the page at the
20545748Smckusick 				 *	moment, leave it in the laundry and
20645748Smckusick 				 *	hope that there will be paging space
20745748Smckusick 				 *	later.
20845748Smckusick 				 */
20945748Smckusick 
21048386Skarels 				if ((pager = object->pager) == NULL) {
21145748Smckusick 					pager = vm_pager_allocate(PG_DFLT,
21245748Smckusick 								  (caddr_t)0,
21345748Smckusick 								  object->size,
21445748Smckusick 								  VM_PROT_ALL);
21548386Skarels 					if (pager != NULL) {
21645748Smckusick 						vm_object_setpager(object,
21745748Smckusick 							pager, 0, FALSE);
21845748Smckusick 					}
21945748Smckusick 				}
22045748Smckusick 				pageout_status = pager ?
22145748Smckusick 					vm_pager_put(pager, m, FALSE) :
22245748Smckusick 					VM_PAGER_FAIL;
22345748Smckusick 				vm_object_lock(object);
22445748Smckusick 				vm_page_lock_queues();
22545748Smckusick 				next = (vm_page_t) queue_next(&m->pageq);
22645748Smckusick 
22745748Smckusick 				switch (pageout_status) {
22845748Smckusick 				case VM_PAGER_OK:
22945748Smckusick 				case VM_PAGER_PEND:
23045748Smckusick 					m->laundry = FALSE;
23145748Smckusick 					break;
23245748Smckusick 				case VM_PAGER_BAD:
23345748Smckusick 					/*
23445748Smckusick 					 * Page outside of range of object.
23545748Smckusick 					 * Right now we essentially lose the
23645748Smckusick 					 * changes by pretending it worked.
23745748Smckusick 					 * XXX dubious, what should we do?
23845748Smckusick 					 */
23945748Smckusick 					m->laundry = FALSE;
24045748Smckusick 					m->clean = TRUE;
24145748Smckusick 					pmap_clear_modify(VM_PAGE_TO_PHYS(m));
24245748Smckusick 					break;
24345748Smckusick 				case VM_PAGER_FAIL:
24445748Smckusick 					/*
24545748Smckusick 					 * If page couldn't be paged out, then
24645748Smckusick 					 * reactivate the page so it doesn't
24745748Smckusick 					 * clog the inactive list.  (We will
24845748Smckusick 					 * try paging out it again later).
24945748Smckusick 					 */
25045748Smckusick 					vm_page_activate(m);
25145748Smckusick 					break;
25245748Smckusick 				}
25345748Smckusick 
25445748Smckusick 				pmap_clear_reference(VM_PAGE_TO_PHYS(m));
25545748Smckusick 
25645748Smckusick 				/*
257*49293Shibler 				 * If the operation is still going, leave
258*49293Shibler 				 * the page busy to block all other accesses.
259*49293Shibler 				 * Also, leave the paging in progress
260*49293Shibler 				 * indicator set so that we don't attempt an
261*49293Shibler 				 * object collapse.
26245748Smckusick 				 */
263*49293Shibler 				if (pageout_status != VM_PAGER_PEND) {
264*49293Shibler 					m->busy = FALSE;
265*49293Shibler 					PAGE_WAKEUP(m);
26645748Smckusick 					object->paging_in_progress--;
267*49293Shibler 				}
26845748Smckusick 				thread_wakeup((int) object);
26945748Smckusick 				vm_object_unlock(object);
27045748Smckusick 				m = next;
27145748Smckusick 			}
27245748Smckusick 			else
27345748Smckusick 				m = (vm_page_t) queue_next(&m->pageq);
27445748Smckusick 		}
27545748Smckusick 	}
27645748Smckusick 
27745748Smckusick 	/*
27845748Smckusick 	 *	Compute the page shortage.  If we are still very low on memory
27945748Smckusick 	 *	be sure that we will move a minimal amount of pages from active
28045748Smckusick 	 *	to inactive.
28145748Smckusick 	 */
28245748Smckusick 
28345748Smckusick 	page_shortage = vm_page_inactive_target - vm_page_inactive_count;
28445748Smckusick 	page_shortage -= vm_page_free_count;
28545748Smckusick 
28645748Smckusick 	if ((page_shortage <= 0) && (pages_freed == 0))
28745748Smckusick 		page_shortage = 1;
28845748Smckusick 
28945748Smckusick 	while (page_shortage > 0) {
29045748Smckusick 		/*
29145748Smckusick 		 *	Move some more pages from active to inactive.
29245748Smckusick 		 */
29345748Smckusick 
29445748Smckusick 		if (queue_empty(&vm_page_queue_active)) {
29545748Smckusick 			break;
29645748Smckusick 		}
29745748Smckusick 		m = (vm_page_t) queue_first(&vm_page_queue_active);
29845748Smckusick 		vm_page_deactivate(m);
29945748Smckusick 		page_shortage--;
30045748Smckusick 	}
30145748Smckusick 
30245748Smckusick 	vm_page_unlock_queues();
30345748Smckusick }
30445748Smckusick 
30545748Smckusick /*
30645748Smckusick  *	vm_pageout is the high level pageout daemon.
30745748Smckusick  */
30845748Smckusick 
30945748Smckusick void vm_pageout()
31045748Smckusick {
31145748Smckusick 	(void) spl0();
31245748Smckusick 
31345748Smckusick 	/*
31445748Smckusick 	 *	Initialize some paging parameters.
31545748Smckusick 	 */
31645748Smckusick 
31745748Smckusick 	if (vm_page_free_min == 0) {
31845748Smckusick 		vm_page_free_min = vm_page_free_count / 20;
31945748Smckusick 		if (vm_page_free_min < 3)
32045748Smckusick 			vm_page_free_min = 3;
32145748Smckusick 
32245748Smckusick 		if (vm_page_free_min > vm_page_free_min_sanity)
32345748Smckusick 			vm_page_free_min = vm_page_free_min_sanity;
32445748Smckusick 	}
32545748Smckusick 
32645748Smckusick 	if (vm_page_free_reserved == 0) {
32745748Smckusick 		if ((vm_page_free_reserved = vm_page_free_min / 2) < 10)
32845748Smckusick 			vm_page_free_reserved = 10;
32945748Smckusick 	}
33045748Smckusick 	if (vm_pageout_free_min == 0) {
33145748Smckusick 		if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10)
33245748Smckusick 			vm_pageout_free_min = 10;
33345748Smckusick 	}
33445748Smckusick 
33545748Smckusick 	if (vm_page_free_target == 0)
33645748Smckusick 		vm_page_free_target = (vm_page_free_min * 4) / 3;
33745748Smckusick 
33845748Smckusick 	if (vm_page_inactive_target == 0)
33945748Smckusick 		vm_page_inactive_target = vm_page_free_min * 2;
34045748Smckusick 
34145748Smckusick 	if (vm_page_free_target <= vm_page_free_min)
34245748Smckusick 		vm_page_free_target = vm_page_free_min + 1;
34345748Smckusick 
34445748Smckusick 	if (vm_page_inactive_target <= vm_page_free_target)
34545748Smckusick 		vm_page_inactive_target = vm_page_free_target + 1;
34645748Smckusick 
34745748Smckusick 	/*
34845748Smckusick 	 *	The pageout daemon is never done, so loop
34945748Smckusick 	 *	forever.
35045748Smckusick 	 */
35145748Smckusick 
35245748Smckusick 	simple_lock(&vm_pages_needed_lock);
35345748Smckusick 	while (TRUE) {
35445748Smckusick 		thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
35545748Smckusick 			     FALSE);
35645748Smckusick 		vm_pageout_scan();
35745748Smckusick 		vm_pager_sync();
35845748Smckusick 		simple_lock(&vm_pages_needed_lock);
35945748Smckusick 		thread_wakeup((int) &vm_page_free_count);
36045748Smckusick 	}
36145748Smckusick }
362