xref: /csrg-svn/sys/vm/vm_page.c (revision 50852)
145748Smckusick /*
245748Smckusick  * Copyright (c) 1991 Regents of the University of California.
345748Smckusick  * All rights reserved.
445748Smckusick  *
545748Smckusick  * This code is derived from software contributed to Berkeley by
645748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
745748Smckusick  *
848493Smckusick  * %sccs.include.redist.c%
945748Smckusick  *
10*50852Swilliam  *	@(#)vm_page.c	7.6 (Berkeley) 08/15/91
1148493Smckusick  *
1248493Smckusick  *
1348493Smckusick  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
1448493Smckusick  * All rights reserved.
1548493Smckusick  *
1648493Smckusick  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1748493Smckusick  *
1848493Smckusick  * Permission to use, copy, modify and distribute this software and
1948493Smckusick  * its documentation is hereby granted, provided that both the copyright
2048493Smckusick  * notice and this permission notice appear in all copies of the
2148493Smckusick  * software, derivative works or modified versions, and any portions
2248493Smckusick  * thereof, and that both notices appear in supporting documentation.
2348493Smckusick  *
2448493Smckusick  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2548493Smckusick  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2648493Smckusick  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2748493Smckusick  *
2848493Smckusick  * Carnegie Mellon requests users of this software to return to
2948493Smckusick  *
3048493Smckusick  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
3148493Smckusick  *  School of Computer Science
3248493Smckusick  *  Carnegie Mellon University
3348493Smckusick  *  Pittsburgh PA 15213-3890
3448493Smckusick  *
3548493Smckusick  * any improvements or extensions that they make and grant Carnegie the
3648493Smckusick  * rights to redistribute these changes.
3745748Smckusick  */
3845748Smckusick 
3945748Smckusick /*
4045748Smckusick  *	Resident memory management module.
4145748Smckusick  */
4245748Smckusick 
4348386Skarels #include "param.h"
4445748Smckusick 
4548386Skarels #include "vm.h"
4648386Skarels #include "vm_map.h"
4748386Skarels #include "vm_page.h"
4848386Skarels #include "vm_pageout.h"
4948386Skarels 
5045748Smckusick /*
5145748Smckusick  *	Associated with page of user-allocatable memory is a
5245748Smckusick  *	page structure.
5345748Smckusick  */
5445748Smckusick 
5545748Smckusick queue_head_t	*vm_page_buckets;		/* Array of buckets */
5645748Smckusick int		vm_page_bucket_count = 0;	/* How big is array? */
5745748Smckusick int		vm_page_hash_mask;		/* Mask for hash function */
5845748Smckusick simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
5945748Smckusick 
6045748Smckusick queue_head_t	vm_page_queue_free;
6145748Smckusick queue_head_t	vm_page_queue_active;
6245748Smckusick queue_head_t	vm_page_queue_inactive;
6345748Smckusick simple_lock_data_t	vm_page_queue_lock;
6445748Smckusick simple_lock_data_t	vm_page_queue_free_lock;
6545748Smckusick 
6645748Smckusick vm_page_t	vm_page_array;
6745748Smckusick long		first_page;
6845748Smckusick long		last_page;
6945748Smckusick vm_offset_t	first_phys_addr;
7045748Smckusick vm_offset_t	last_phys_addr;
7150555Smckusick vm_size_t	page_mask;
7250555Smckusick int		page_shift;
7345748Smckusick 
7445748Smckusick /*
7545748Smckusick  *	vm_set_page_size:
7645748Smckusick  *
7745748Smckusick  *	Sets the page size, perhaps based upon the memory
7845748Smckusick  *	size.  Must be called before any use of page-size
7945748Smckusick  *	dependent functions.
8045748Smckusick  *
8150555Smckusick  *	Sets page_shift and page_mask from vm_stat.page_size.
8245748Smckusick  */
8345748Smckusick void vm_set_page_size()
8445748Smckusick {
8545748Smckusick 
8650555Smckusick 	if (vm_stat.page_size == 0)
8750555Smckusick 		vm_stat.page_size = DEFAULT_PAGE_SIZE;
8850555Smckusick 	page_mask = vm_stat.page_size - 1;
8950555Smckusick 	if ((page_mask & vm_stat.page_size) != 0)
9045748Smckusick 		panic("vm_set_page_size: page size not a power of two");
9145748Smckusick 	for (page_shift = 0; ; page_shift++)
9250555Smckusick 		if ((1 << page_shift) == vm_stat.page_size)
9345748Smckusick 			break;
9445748Smckusick }
9545748Smckusick 
9645748Smckusick 
9745748Smckusick /*
9845748Smckusick  *	vm_page_startup:
9945748Smckusick  *
10045748Smckusick  *	Initializes the resident memory module.
10145748Smckusick  *
10245748Smckusick  *	Allocates memory for the page cells, and
10345748Smckusick  *	for the object/offset-to-page hash table headers.
10445748Smckusick  *	Each page cell is initialized and placed on the free list.
10545748Smckusick  */
10645748Smckusick vm_offset_t vm_page_startup(start, end, vaddr)
10745748Smckusick 	register vm_offset_t	start;
10845748Smckusick 	vm_offset_t	end;
10945748Smckusick 	register vm_offset_t	vaddr;
11045748Smckusick {
11145748Smckusick 	register vm_offset_t	mapped;
11245748Smckusick 	register vm_page_t	m;
11345748Smckusick 	register queue_t	bucket;
11445748Smckusick 	vm_size_t		npages;
11545748Smckusick 	register vm_offset_t	new_start;
11645748Smckusick 	int			i;
11745748Smckusick 	vm_offset_t		pa;
11845748Smckusick 
11945748Smckusick 	extern	vm_offset_t	kentry_data;
12045748Smckusick 	extern	vm_size_t	kentry_data_size;
12145748Smckusick 
12245748Smckusick 
12345748Smckusick 	/*
12445748Smckusick 	 *	Initialize the locks
12545748Smckusick 	 */
12645748Smckusick 
12745748Smckusick 	simple_lock_init(&vm_page_queue_free_lock);
12845748Smckusick 	simple_lock_init(&vm_page_queue_lock);
12945748Smckusick 
13045748Smckusick 	/*
13145748Smckusick 	 *	Initialize the queue headers for the free queue,
13245748Smckusick 	 *	the active queue and the inactive queue.
13345748Smckusick 	 */
13445748Smckusick 
13545748Smckusick 	queue_init(&vm_page_queue_free);
13645748Smckusick 	queue_init(&vm_page_queue_active);
13745748Smckusick 	queue_init(&vm_page_queue_inactive);
13845748Smckusick 
13945748Smckusick 	/*
14045748Smckusick 	 *	Allocate (and initialize) the hash table buckets.
14145748Smckusick 	 *
14245748Smckusick 	 *	The number of buckets MUST BE a power of 2, and
14345748Smckusick 	 *	the actual value is the next power of 2 greater
14445748Smckusick 	 *	than the number of physical pages in the system.
14545748Smckusick 	 *
14645748Smckusick 	 *	Note:
14745748Smckusick 	 *		This computation can be tweaked if desired.
14845748Smckusick 	 */
14945748Smckusick 
15045748Smckusick 	vm_page_buckets = (queue_t) vaddr;
15145748Smckusick 	bucket = vm_page_buckets;
15245748Smckusick 	if (vm_page_bucket_count == 0) {
15345748Smckusick 		vm_page_bucket_count = 1;
15445748Smckusick 		while (vm_page_bucket_count < atop(end - start))
15545748Smckusick 			vm_page_bucket_count <<= 1;
15645748Smckusick 	}
15745748Smckusick 
15845748Smckusick 	vm_page_hash_mask = vm_page_bucket_count - 1;
15945748Smckusick 
16045748Smckusick 	/*
16145748Smckusick 	 *	Validate these addresses.
16245748Smckusick 	 */
16345748Smckusick 
16445748Smckusick 	new_start = round_page(((queue_t)start) + vm_page_bucket_count);
16545748Smckusick 	mapped = vaddr;
16645748Smckusick 	vaddr = pmap_map(mapped, start, new_start,
16745748Smckusick 			VM_PROT_READ|VM_PROT_WRITE);
16845748Smckusick 	start = new_start;
16945748Smckusick 	blkclr((caddr_t) mapped, vaddr - mapped);
17045748Smckusick 	mapped = vaddr;
17145748Smckusick 
17245748Smckusick 	for (i = vm_page_bucket_count; i--;) {
17345748Smckusick 		queue_init(bucket);
17445748Smckusick 		bucket++;
17545748Smckusick 	}
17645748Smckusick 
17745748Smckusick 	simple_lock_init(&bucket_lock);
17845748Smckusick 
17945748Smckusick 	/*
18045748Smckusick 	 *	round (or truncate) the addresses to our page size.
18145748Smckusick 	 */
18245748Smckusick 
18345748Smckusick 	end = trunc_page(end);
18445748Smckusick 
18545748Smckusick 	/*
18645748Smckusick 	 *	Pre-allocate maps and map entries that cannot be dynamically
18745748Smckusick 	 *	allocated via malloc().  The maps include the kernel_map and
18845748Smckusick 	 *	kmem_map which must be initialized before malloc() will
18945748Smckusick 	 *	work (obviously).  Also could include pager maps which would
19045748Smckusick 	 *	be allocated before kmeminit.
19145748Smckusick 	 *
19245748Smckusick 	 *	Allow some kernel map entries... this should be plenty
19345748Smckusick 	 *	since people shouldn't be cluttering up the kernel
19445748Smckusick 	 *	map (they should use their own maps).
19545748Smckusick 	 */
19645748Smckusick 
19745748Smckusick 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
19845748Smckusick 			   MAX_KMAPENT * sizeof(struct vm_map_entry);
19945748Smckusick 	kentry_data_size = round_page(kentry_data_size);
20045748Smckusick 	kentry_data = (vm_offset_t) vaddr;
20145748Smckusick 	vaddr += kentry_data_size;
20245748Smckusick 
20345748Smckusick 	/*
20445748Smckusick 	 *	Validate these zone addresses.
20545748Smckusick 	 */
20645748Smckusick 
20745748Smckusick 	new_start = start + (vaddr - mapped);
20845748Smckusick 	pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
20945748Smckusick 	blkclr((caddr_t) mapped, (vaddr - mapped));
21045748Smckusick 	mapped = vaddr;
21145748Smckusick 	start = new_start;
21245748Smckusick 
21345748Smckusick 	/*
21445748Smckusick  	 *	Compute the number of pages of memory that will be
21545748Smckusick 	 *	available for use (taking into account the overhead
21645748Smckusick 	 *	of a page structure per page).
21745748Smckusick 	 */
21845748Smckusick 
21950555Smckusick 	vm_stat.free_count = npages =
22045748Smckusick 		(end - start)/(PAGE_SIZE + sizeof(struct vm_page));
22145748Smckusick 
22245748Smckusick 	/*
22345748Smckusick 	 *	Initialize the mem entry structures now, and
22445748Smckusick 	 *	put them in the free queue.
22545748Smckusick 	 */
22645748Smckusick 
22745748Smckusick 	m = vm_page_array = (vm_page_t) vaddr;
22845748Smckusick 	first_page = start;
22945748Smckusick 	first_page += npages*sizeof(struct vm_page);
23045748Smckusick 	first_page = atop(round_page(first_page));
23145748Smckusick 	last_page  = first_page + npages - 1;
23245748Smckusick 
23345748Smckusick 	first_phys_addr = ptoa(first_page);
23445748Smckusick 	last_phys_addr  = ptoa(last_page) + PAGE_MASK;
23545748Smckusick 
236*50852Swilliam 
237*50852Swilliam #ifdef i386
238*50852Swilliam 	/* XXX - waiting for pmap_bootstrap_malloc() (or somebody like him) */
239*50852Swilliam 	if (first_phys_addr > 0xa0000)
240*50852Swilliam 		panic("vm_page_startup: fell into the hole");
241*50852Swilliam #endif
24245748Smckusick 	/*
24345748Smckusick 	 *	Validate these addresses.
24445748Smckusick 	 */
24545748Smckusick 
24645748Smckusick 	new_start = start + (round_page(m + npages) - mapped);
24745748Smckusick 	mapped = pmap_map(mapped, start, new_start,
24845748Smckusick 			VM_PROT_READ|VM_PROT_WRITE);
24945748Smckusick 	start = new_start;
25045748Smckusick 
25145748Smckusick 	/*
25245748Smckusick 	 *	Clear all of the page structures
25345748Smckusick 	 */
25445748Smckusick 	blkclr((caddr_t)m, npages * sizeof(*m));
25545748Smckusick 
25645748Smckusick 	pa = first_phys_addr;
25745748Smckusick 	while (npages--) {
25845748Smckusick 		m->copy_on_write = FALSE;
25945748Smckusick 		m->wanted = FALSE;
26045748Smckusick 		m->inactive = FALSE;
26145748Smckusick 		m->active = FALSE;
26245748Smckusick 		m->busy = FALSE;
26348386Skarels 		m->object = NULL;
26445748Smckusick 		m->phys_addr = pa;
265*50852Swilliam #ifdef i386
266*50852Swilliam 		if (pmap_isvalidphys(m->phys_addr)) {
267*50852Swilliam 			queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
268*50852Swilliam 		} else {
269*50852Swilliam 			/* perhaps iomem needs it's own type, or dev pager? */
270*50852Swilliam 			m->fictitious = 1;
271*50852Swilliam 			m->busy = TRUE;
272*50852Swilliam 			vm_stat.free_count--;
273*50852Swilliam 		}
274*50852Swilliam #else /* i386 */
27545748Smckusick 		queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
276*50852Swilliam #endif /* i386 */
27745748Smckusick 		m++;
27845748Smckusick 		pa += PAGE_SIZE;
27945748Smckusick 	}
28045748Smckusick 
28145748Smckusick 	/*
28245748Smckusick 	 *	Initialize vm_pages_needed lock here - don't wait for pageout
28345748Smckusick 	 *	daemon	XXX
28445748Smckusick 	 */
28545748Smckusick 	simple_lock_init(&vm_pages_needed_lock);
28645748Smckusick 
28745748Smckusick 	return(mapped);
28845748Smckusick }
28945748Smckusick 
29045748Smckusick /*
29145748Smckusick  *	vm_page_hash:
29245748Smckusick  *
29345748Smckusick  *	Distributes the object/offset key pair among hash buckets.
29445748Smckusick  *
29545748Smckusick  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
29645748Smckusick  */
29745748Smckusick #define vm_page_hash(object, offset) \
29845748Smckusick 	(((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
29945748Smckusick 
30045748Smckusick /*
30145748Smckusick  *	vm_page_insert:		[ internal use only ]
30245748Smckusick  *
30345748Smckusick  *	Inserts the given mem entry into the object/object-page
30445748Smckusick  *	table and object list.
30545748Smckusick  *
30645748Smckusick  *	The object and page must be locked.
30745748Smckusick  */
30845748Smckusick 
30945748Smckusick void vm_page_insert(mem, object, offset)
31045748Smckusick 	register vm_page_t	mem;
31145748Smckusick 	register vm_object_t	object;
31245748Smckusick 	register vm_offset_t	offset;
31345748Smckusick {
31445748Smckusick 	register queue_t	bucket;
31545748Smckusick 	int			spl;
31645748Smckusick 
31745748Smckusick 	VM_PAGE_CHECK(mem);
31845748Smckusick 
31945748Smckusick 	if (mem->tabled)
32045748Smckusick 		panic("vm_page_insert: already inserted");
32145748Smckusick 
32245748Smckusick 	/*
32345748Smckusick 	 *	Record the object/offset pair in this page
32445748Smckusick 	 */
32545748Smckusick 
32645748Smckusick 	mem->object = object;
32745748Smckusick 	mem->offset = offset;
32845748Smckusick 
32945748Smckusick 	/*
33045748Smckusick 	 *	Insert it into the object_object/offset hash table
33145748Smckusick 	 */
33245748Smckusick 
33345748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
33445748Smckusick 	spl = splimp();
33545748Smckusick 	simple_lock(&bucket_lock);
33645748Smckusick 	queue_enter(bucket, mem, vm_page_t, hashq);
33745748Smckusick 	simple_unlock(&bucket_lock);
33845748Smckusick 	(void) splx(spl);
33945748Smckusick 
34045748Smckusick 	/*
34145748Smckusick 	 *	Now link into the object's list of backed pages.
34245748Smckusick 	 */
34345748Smckusick 
34445748Smckusick 	queue_enter(&object->memq, mem, vm_page_t, listq);
34545748Smckusick 	mem->tabled = TRUE;
34645748Smckusick 
34745748Smckusick 	/*
34845748Smckusick 	 *	And show that the object has one more resident
34945748Smckusick 	 *	page.
35045748Smckusick 	 */
35145748Smckusick 
35245748Smckusick 	object->resident_page_count++;
35345748Smckusick }
35445748Smckusick 
35545748Smckusick /*
35645748Smckusick  *	vm_page_remove:		[ internal use only ]
35745748Smckusick  *
35845748Smckusick  *	Removes the given mem entry from the object/offset-page
35945748Smckusick  *	table and the object page list.
36045748Smckusick  *
36145748Smckusick  *	The object and page must be locked.
36245748Smckusick  */
36345748Smckusick 
36445748Smckusick void vm_page_remove(mem)
36545748Smckusick 	register vm_page_t	mem;
36645748Smckusick {
36745748Smckusick 	register queue_t	bucket;
36845748Smckusick 	int			spl;
36945748Smckusick 
37045748Smckusick 	VM_PAGE_CHECK(mem);
37145748Smckusick 
37245748Smckusick 	if (!mem->tabled)
37345748Smckusick 		return;
37445748Smckusick 
37545748Smckusick 	/*
37645748Smckusick 	 *	Remove from the object_object/offset hash table
37745748Smckusick 	 */
37845748Smckusick 
37945748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
38045748Smckusick 	spl = splimp();
38145748Smckusick 	simple_lock(&bucket_lock);
38245748Smckusick 	queue_remove(bucket, mem, vm_page_t, hashq);
38345748Smckusick 	simple_unlock(&bucket_lock);
38445748Smckusick 	(void) splx(spl);
38545748Smckusick 
38645748Smckusick 	/*
38745748Smckusick 	 *	Now remove from the object's list of backed pages.
38845748Smckusick 	 */
38945748Smckusick 
39045748Smckusick 	queue_remove(&mem->object->memq, mem, vm_page_t, listq);
39145748Smckusick 
39245748Smckusick 	/*
39345748Smckusick 	 *	And show that the object has one fewer resident
39445748Smckusick 	 *	page.
39545748Smckusick 	 */
39645748Smckusick 
39745748Smckusick 	mem->object->resident_page_count--;
39845748Smckusick 
39945748Smckusick 	mem->tabled = FALSE;
40045748Smckusick }
40145748Smckusick 
40245748Smckusick /*
40345748Smckusick  *	vm_page_lookup:
40445748Smckusick  *
40545748Smckusick  *	Returns the page associated with the object/offset
40648386Skarels  *	pair specified; if none is found, NULL is returned.
40745748Smckusick  *
40845748Smckusick  *	The object must be locked.  No side effects.
40945748Smckusick  */
41045748Smckusick 
41145748Smckusick vm_page_t vm_page_lookup(object, offset)
41245748Smckusick 	register vm_object_t	object;
41345748Smckusick 	register vm_offset_t	offset;
41445748Smckusick {
41545748Smckusick 	register vm_page_t	mem;
41645748Smckusick 	register queue_t	bucket;
41745748Smckusick 	int			spl;
41845748Smckusick 
41945748Smckusick 	/*
42045748Smckusick 	 *	Search the hash table for this object/offset pair
42145748Smckusick 	 */
42245748Smckusick 
42345748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
42445748Smckusick 
42545748Smckusick 	spl = splimp();
42645748Smckusick 	simple_lock(&bucket_lock);
42745748Smckusick 	mem = (vm_page_t) queue_first(bucket);
42845748Smckusick 	while (!queue_end(bucket, (queue_entry_t) mem)) {
42945748Smckusick 		VM_PAGE_CHECK(mem);
43045748Smckusick 		if ((mem->object == object) && (mem->offset == offset)) {
43145748Smckusick 			simple_unlock(&bucket_lock);
43245748Smckusick 			splx(spl);
43345748Smckusick 			return(mem);
43445748Smckusick 		}
43545748Smckusick 		mem = (vm_page_t) queue_next(&mem->hashq);
43645748Smckusick 	}
43745748Smckusick 
43845748Smckusick 	simple_unlock(&bucket_lock);
43945748Smckusick 	splx(spl);
44048386Skarels 	return(NULL);
44145748Smckusick }
44245748Smckusick 
44345748Smckusick /*
44445748Smckusick  *	vm_page_rename:
44545748Smckusick  *
44645748Smckusick  *	Move the given memory entry from its
44745748Smckusick  *	current object to the specified target object/offset.
44845748Smckusick  *
44945748Smckusick  *	The object must be locked.
45045748Smckusick  */
45145748Smckusick void vm_page_rename(mem, new_object, new_offset)
45245748Smckusick 	register vm_page_t	mem;
45345748Smckusick 	register vm_object_t	new_object;
45445748Smckusick 	vm_offset_t		new_offset;
45545748Smckusick {
45645748Smckusick 	if (mem->object == new_object)
45745748Smckusick 		return;
45845748Smckusick 
45945748Smckusick 	vm_page_lock_queues();	/* keep page from moving out from
46045748Smckusick 				   under pageout daemon */
46145748Smckusick     	vm_page_remove(mem);
46245748Smckusick 	vm_page_insert(mem, new_object, new_offset);
46345748Smckusick 	vm_page_unlock_queues();
46445748Smckusick }
46545748Smckusick 
46645748Smckusick void		vm_page_init(mem, object, offset)
46745748Smckusick 	vm_page_t	mem;
46845748Smckusick 	vm_object_t	object;
46945748Smckusick 	vm_offset_t	offset;
47045748Smckusick {
47149286Shibler #ifdef DEBUG
47245748Smckusick #define	vm_page_init(mem, object, offset)  {\
47345748Smckusick 		(mem)->busy = TRUE; \
47445748Smckusick 		(mem)->tabled = FALSE; \
47545748Smckusick 		vm_page_insert((mem), (object), (offset)); \
47645748Smckusick 		(mem)->absent = FALSE; \
47745748Smckusick 		(mem)->fictitious = FALSE; \
47845748Smckusick 		(mem)->page_lock = VM_PROT_NONE; \
47945748Smckusick 		(mem)->unlock_request = VM_PROT_NONE; \
48045748Smckusick 		(mem)->laundry = FALSE; \
48145748Smckusick 		(mem)->active = FALSE; \
48245748Smckusick 		(mem)->inactive = FALSE; \
48345748Smckusick 		(mem)->wire_count = 0; \
48445748Smckusick 		(mem)->clean = TRUE; \
48545748Smckusick 		(mem)->copy_on_write = FALSE; \
48645748Smckusick 		(mem)->fake = TRUE; \
48749286Shibler 		(mem)->pagerowned = FALSE; \
48849286Shibler 		(mem)->ptpage = FALSE; \
48945748Smckusick 	}
49049286Shibler #else
49149286Shibler #define	vm_page_init(mem, object, offset)  {\
49249286Shibler 		(mem)->busy = TRUE; \
49349286Shibler 		(mem)->tabled = FALSE; \
49449286Shibler 		vm_page_insert((mem), (object), (offset)); \
49549286Shibler 		(mem)->absent = FALSE; \
49649286Shibler 		(mem)->fictitious = FALSE; \
49749286Shibler 		(mem)->page_lock = VM_PROT_NONE; \
49849286Shibler 		(mem)->unlock_request = VM_PROT_NONE; \
49949286Shibler 		(mem)->laundry = FALSE; \
50049286Shibler 		(mem)->active = FALSE; \
50149286Shibler 		(mem)->inactive = FALSE; \
50249286Shibler 		(mem)->wire_count = 0; \
50349286Shibler 		(mem)->clean = TRUE; \
50449286Shibler 		(mem)->copy_on_write = FALSE; \
50549286Shibler 		(mem)->fake = TRUE; \
50649286Shibler 	}
50749286Shibler #endif
50845748Smckusick 
50945748Smckusick 	vm_page_init(mem, object, offset);
51045748Smckusick }
51145748Smckusick 
51245748Smckusick /*
51345748Smckusick  *	vm_page_alloc:
51445748Smckusick  *
51545748Smckusick  *	Allocate and return a memory cell associated
51645748Smckusick  *	with this VM object/offset pair.
51745748Smckusick  *
51845748Smckusick  *	Object must be locked.
51945748Smckusick  */
52045748Smckusick vm_page_t vm_page_alloc(object, offset)
52145748Smckusick 	vm_object_t	object;
52245748Smckusick 	vm_offset_t	offset;
52345748Smckusick {
52445748Smckusick 	register vm_page_t	mem;
52545748Smckusick 	int		spl;
52645748Smckusick 
52745748Smckusick 	spl = splimp();				/* XXX */
52845748Smckusick 	simple_lock(&vm_page_queue_free_lock);
52945748Smckusick 	if (queue_empty(&vm_page_queue_free)) {
53045748Smckusick 		simple_unlock(&vm_page_queue_free_lock);
53145748Smckusick 		splx(spl);
53248386Skarels 		return(NULL);
53345748Smckusick 	}
53445748Smckusick 
53545748Smckusick 	queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
53645748Smckusick 
53750555Smckusick 	vm_stat.free_count--;
53845748Smckusick 	simple_unlock(&vm_page_queue_free_lock);
53945748Smckusick 	splx(spl);
54045748Smckusick 
54145748Smckusick 	vm_page_init(mem, object, offset);
54245748Smckusick 
54345748Smckusick 	/*
54445748Smckusick 	 *	Decide if we should poke the pageout daemon.
54545748Smckusick 	 *	We do this if the free count is less than the low
54645748Smckusick 	 *	water mark, or if the free count is less than the high
54745748Smckusick 	 *	water mark (but above the low water mark) and the inactive
54845748Smckusick 	 *	count is less than its target.
54945748Smckusick 	 *
55045748Smckusick 	 *	We don't have the counts locked ... if they change a little,
55145748Smckusick 	 *	it doesn't really matter.
55245748Smckusick 	 */
55345748Smckusick 
55450555Smckusick 	if ((vm_stat.free_count < vm_stat.free_min) ||
55550555Smckusick 			((vm_stat.free_count < vm_stat.free_target) &&
55650555Smckusick 			(vm_stat.inactive_count < vm_stat.inactive_target)))
55745748Smckusick 		thread_wakeup(&vm_pages_needed);
55845748Smckusick 	return(mem);
55945748Smckusick }
56045748Smckusick 
56145748Smckusick /*
56245748Smckusick  *	vm_page_free:
56345748Smckusick  *
56445748Smckusick  *	Returns the given page to the free list,
56545748Smckusick  *	disassociating it with any VM object.
56645748Smckusick  *
56745748Smckusick  *	Object and page must be locked prior to entry.
56845748Smckusick  */
56945748Smckusick void vm_page_free(mem)
57045748Smckusick 	register vm_page_t	mem;
57145748Smckusick {
57245748Smckusick 	vm_page_remove(mem);
57345748Smckusick 	if (mem->active) {
57445748Smckusick 		queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
57545748Smckusick 		mem->active = FALSE;
57650555Smckusick 		vm_stat.active_count--;
57745748Smckusick 	}
57845748Smckusick 
57945748Smckusick 	if (mem->inactive) {
58045748Smckusick 		queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
58145748Smckusick 		mem->inactive = FALSE;
58250555Smckusick 		vm_stat.inactive_count--;
58345748Smckusick 	}
58445748Smckusick 
58545748Smckusick 	if (!mem->fictitious) {
58645748Smckusick 		int	spl;
58745748Smckusick 
58845748Smckusick 		spl = splimp();
58945748Smckusick 		simple_lock(&vm_page_queue_free_lock);
59045748Smckusick 		queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
59145748Smckusick 
59250555Smckusick 		vm_stat.free_count++;
59345748Smckusick 		simple_unlock(&vm_page_queue_free_lock);
59445748Smckusick 		splx(spl);
59545748Smckusick 	}
59645748Smckusick }
59745748Smckusick 
59845748Smckusick /*
59945748Smckusick  *	vm_page_wire:
60045748Smckusick  *
60145748Smckusick  *	Mark this page as wired down by yet
60245748Smckusick  *	another map, removing it from paging queues
60345748Smckusick  *	as necessary.
60445748Smckusick  *
60545748Smckusick  *	The page queues must be locked.
60645748Smckusick  */
60745748Smckusick void vm_page_wire(mem)
60845748Smckusick 	register vm_page_t	mem;
60945748Smckusick {
61045748Smckusick 	VM_PAGE_CHECK(mem);
61145748Smckusick 
61245748Smckusick 	if (mem->wire_count == 0) {
61345748Smckusick 		if (mem->active) {
61445748Smckusick 			queue_remove(&vm_page_queue_active, mem, vm_page_t,
61545748Smckusick 						pageq);
61650555Smckusick 			vm_stat.active_count--;
61745748Smckusick 			mem->active = FALSE;
61845748Smckusick 		}
61945748Smckusick 		if (mem->inactive) {
62045748Smckusick 			queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
62145748Smckusick 						pageq);
62250555Smckusick 			vm_stat.inactive_count--;
62345748Smckusick 			mem->inactive = FALSE;
62445748Smckusick 		}
62550555Smckusick 		vm_stat.wire_count++;
62645748Smckusick 	}
62745748Smckusick 	mem->wire_count++;
62845748Smckusick }
62945748Smckusick 
63045748Smckusick /*
63145748Smckusick  *	vm_page_unwire:
63245748Smckusick  *
63345748Smckusick  *	Release one wiring of this page, potentially
63445748Smckusick  *	enabling it to be paged again.
63545748Smckusick  *
63645748Smckusick  *	The page queues must be locked.
63745748Smckusick  */
63845748Smckusick void vm_page_unwire(mem)
63945748Smckusick 	register vm_page_t	mem;
64045748Smckusick {
64145748Smckusick 	VM_PAGE_CHECK(mem);
64245748Smckusick 
64345748Smckusick 	mem->wire_count--;
64445748Smckusick 	if (mem->wire_count == 0) {
64545748Smckusick 		queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
64650555Smckusick 		vm_stat.active_count++;
64745748Smckusick 		mem->active = TRUE;
64850555Smckusick 		vm_stat.wire_count--;
64945748Smckusick 	}
65045748Smckusick }
65145748Smckusick 
65245748Smckusick /*
65345748Smckusick  *	vm_page_deactivate:
65445748Smckusick  *
65545748Smckusick  *	Returns the given page to the inactive list,
65645748Smckusick  *	indicating that no physical maps have access
65745748Smckusick  *	to this page.  [Used by the physical mapping system.]
65845748Smckusick  *
65945748Smckusick  *	The page queues must be locked.
66045748Smckusick  */
66145748Smckusick void vm_page_deactivate(m)
66245748Smckusick 	register vm_page_t	m;
66345748Smckusick {
66445748Smckusick 	VM_PAGE_CHECK(m);
66545748Smckusick 
66645748Smckusick 	/*
66745748Smckusick 	 *	Only move active pages -- ignore locked or already
66845748Smckusick 	 *	inactive ones.
66945748Smckusick 	 */
67045748Smckusick 
67145748Smckusick 	if (m->active) {
67245748Smckusick 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
67345748Smckusick 		queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
67445748Smckusick 		queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
67545748Smckusick 		m->active = FALSE;
67645748Smckusick 		m->inactive = TRUE;
67750555Smckusick 		vm_stat.active_count--;
67850555Smckusick 		vm_stat.inactive_count++;
67945748Smckusick 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
68045748Smckusick 			m->clean = FALSE;
68145748Smckusick 		m->laundry = !m->clean;
68245748Smckusick 	}
68345748Smckusick }
68445748Smckusick 
68545748Smckusick /*
68645748Smckusick  *	vm_page_activate:
68745748Smckusick  *
68845748Smckusick  *	Put the specified page on the active list (if appropriate).
68945748Smckusick  *
69045748Smckusick  *	The page queues must be locked.
69145748Smckusick  */
69245748Smckusick 
69345748Smckusick void vm_page_activate(m)
69445748Smckusick 	register vm_page_t	m;
69545748Smckusick {
69645748Smckusick 	VM_PAGE_CHECK(m);
69745748Smckusick 
69845748Smckusick 	if (m->inactive) {
69945748Smckusick 		queue_remove(&vm_page_queue_inactive, m, vm_page_t,
70045748Smckusick 						pageq);
70150555Smckusick 		vm_stat.inactive_count--;
70245748Smckusick 		m->inactive = FALSE;
70345748Smckusick 	}
70445748Smckusick 	if (m->wire_count == 0) {
70545748Smckusick 		if (m->active)
70645748Smckusick 			panic("vm_page_activate: already active");
70745748Smckusick 
70845748Smckusick 		queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
70945748Smckusick 		m->active = TRUE;
71050555Smckusick 		vm_stat.active_count++;
71145748Smckusick 	}
71245748Smckusick }
71345748Smckusick 
71445748Smckusick /*
71545748Smckusick  *	vm_page_zero_fill:
71645748Smckusick  *
71745748Smckusick  *	Zero-fill the specified page.
71845748Smckusick  *	Written as a standard pagein routine, to
71945748Smckusick  *	be used by the zero-fill object.
72045748Smckusick  */
72145748Smckusick 
72245748Smckusick boolean_t vm_page_zero_fill(m)
72345748Smckusick 	vm_page_t	m;
72445748Smckusick {
72545748Smckusick 	VM_PAGE_CHECK(m);
72645748Smckusick 
72745748Smckusick 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
72845748Smckusick 	return(TRUE);
72945748Smckusick }
73045748Smckusick 
73145748Smckusick /*
73245748Smckusick  *	vm_page_copy:
73345748Smckusick  *
73445748Smckusick  *	Copy one page to another
73545748Smckusick  */
73645748Smckusick 
73745748Smckusick void vm_page_copy(src_m, dest_m)
73845748Smckusick 	vm_page_t	src_m;
73945748Smckusick 	vm_page_t	dest_m;
74045748Smckusick {
74145748Smckusick 	VM_PAGE_CHECK(src_m);
74245748Smckusick 	VM_PAGE_CHECK(dest_m);
74345748Smckusick 
74445748Smckusick 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
74545748Smckusick }
746