xref: /csrg-svn/sys/vm/vm_page.c (revision 45748)
1*45748Smckusick /*
2*45748Smckusick  * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
3*45748Smckusick  * Copyright (c) 1987 Carnegie-Mellon University
4*45748Smckusick  * Copyright (c) 1991 Regents of the University of California.
5*45748Smckusick  * All rights reserved.
6*45748Smckusick  *
7*45748Smckusick  * This code is derived from software contributed to Berkeley by
8*45748Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
9*45748Smckusick  *
10*45748Smckusick  * The CMU software License Agreement specifies the terms and conditions
11*45748Smckusick  * for use and redistribution.
12*45748Smckusick  *
13*45748Smckusick  *	@(#)vm_page.c	7.1 (Berkeley) 12/05/90
14*45748Smckusick  */
15*45748Smckusick 
16*45748Smckusick /*
17*45748Smckusick  *	Resident memory management module.
18*45748Smckusick  */
19*45748Smckusick 
20*45748Smckusick #include "types.h"
21*45748Smckusick #include "../vm/vm_param.h"
22*45748Smckusick #include "../vm/vm_map.h"
23*45748Smckusick #include "../vm/vm_page.h"
24*45748Smckusick #include "../vm/vm_prot.h"
25*45748Smckusick #include "../vm/vm_statistics.h"
26*45748Smckusick #include "../vm/vm_pageout.h"
27*45748Smckusick #include "../vm/pmap.h"
28*45748Smckusick 
29*45748Smckusick /*
30*45748Smckusick  *	Associated with page of user-allocatable memory is a
31*45748Smckusick  *	page structure.
32*45748Smckusick  */
33*45748Smckusick 
34*45748Smckusick queue_head_t	*vm_page_buckets;		/* Array of buckets */
35*45748Smckusick int		vm_page_bucket_count = 0;	/* How big is array? */
36*45748Smckusick int		vm_page_hash_mask;		/* Mask for hash function */
37*45748Smckusick simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
38*45748Smckusick 
39*45748Smckusick vm_size_t	page_size  = 4096;
40*45748Smckusick vm_size_t	page_mask  = 4095;
41*45748Smckusick int		page_shift = 12;
42*45748Smckusick 
43*45748Smckusick queue_head_t	vm_page_queue_free;
44*45748Smckusick queue_head_t	vm_page_queue_active;
45*45748Smckusick queue_head_t	vm_page_queue_inactive;
46*45748Smckusick simple_lock_data_t	vm_page_queue_lock;
47*45748Smckusick simple_lock_data_t	vm_page_queue_free_lock;
48*45748Smckusick 
49*45748Smckusick vm_page_t	vm_page_array;
50*45748Smckusick long		first_page;
51*45748Smckusick long		last_page;
52*45748Smckusick vm_offset_t	first_phys_addr;
53*45748Smckusick vm_offset_t	last_phys_addr;
54*45748Smckusick 
55*45748Smckusick int	vm_page_free_count;
56*45748Smckusick int	vm_page_active_count;
57*45748Smckusick int	vm_page_inactive_count;
58*45748Smckusick int	vm_page_wire_count;
59*45748Smckusick int	vm_page_laundry_count;
60*45748Smckusick 
61*45748Smckusick int	vm_page_free_target = 0;
62*45748Smckusick int	vm_page_free_min = 0;
63*45748Smckusick int	vm_page_inactive_target = 0;
64*45748Smckusick int	vm_page_free_reserved = 0;
65*45748Smckusick 
66*45748Smckusick /*
67*45748Smckusick  *	vm_set_page_size:
68*45748Smckusick  *
69*45748Smckusick  *	Sets the page size, perhaps based upon the memory
70*45748Smckusick  *	size.  Must be called before any use of page-size
71*45748Smckusick  *	dependent functions.
72*45748Smckusick  *
73*45748Smckusick  *	Sets page_shift and page_mask from page_size.
74*45748Smckusick  */
75*45748Smckusick void vm_set_page_size()
76*45748Smckusick {
77*45748Smckusick 	page_mask = page_size - 1;
78*45748Smckusick 
79*45748Smckusick 	if ((page_mask & page_size) != 0)
80*45748Smckusick 		panic("vm_set_page_size: page size not a power of two");
81*45748Smckusick 
82*45748Smckusick 	for (page_shift = 0; ; page_shift++)
83*45748Smckusick 		if ((1 << page_shift) == page_size)
84*45748Smckusick 			break;
85*45748Smckusick }
86*45748Smckusick 
87*45748Smckusick 
88*45748Smckusick /*
89*45748Smckusick  *	vm_page_startup:
90*45748Smckusick  *
91*45748Smckusick  *	Initializes the resident memory module.
92*45748Smckusick  *
93*45748Smckusick  *	Allocates memory for the page cells, and
94*45748Smckusick  *	for the object/offset-to-page hash table headers.
95*45748Smckusick  *	Each page cell is initialized and placed on the free list.
96*45748Smckusick  */
97*45748Smckusick vm_offset_t vm_page_startup(start, end, vaddr)
98*45748Smckusick 	register vm_offset_t	start;
99*45748Smckusick 	vm_offset_t	end;
100*45748Smckusick 	register vm_offset_t	vaddr;
101*45748Smckusick {
102*45748Smckusick 	register vm_offset_t	mapped;
103*45748Smckusick 	register vm_page_t	m;
104*45748Smckusick 	register queue_t	bucket;
105*45748Smckusick 	vm_size_t		npages;
106*45748Smckusick 	register vm_offset_t	new_start;
107*45748Smckusick 	int			i;
108*45748Smckusick 	vm_offset_t		pa;
109*45748Smckusick 
110*45748Smckusick 	extern	vm_offset_t	kentry_data;
111*45748Smckusick 	extern	vm_size_t	kentry_data_size;
112*45748Smckusick 
113*45748Smckusick 
114*45748Smckusick 	/*
115*45748Smckusick 	 *	Initialize the locks
116*45748Smckusick 	 */
117*45748Smckusick 
118*45748Smckusick 	simple_lock_init(&vm_page_queue_free_lock);
119*45748Smckusick 	simple_lock_init(&vm_page_queue_lock);
120*45748Smckusick 
121*45748Smckusick 	/*
122*45748Smckusick 	 *	Initialize the queue headers for the free queue,
123*45748Smckusick 	 *	the active queue and the inactive queue.
124*45748Smckusick 	 */
125*45748Smckusick 
126*45748Smckusick 	queue_init(&vm_page_queue_free);
127*45748Smckusick 	queue_init(&vm_page_queue_active);
128*45748Smckusick 	queue_init(&vm_page_queue_inactive);
129*45748Smckusick 
130*45748Smckusick 	/*
131*45748Smckusick 	 *	Allocate (and initialize) the hash table buckets.
132*45748Smckusick 	 *
133*45748Smckusick 	 *	The number of buckets MUST BE a power of 2, and
134*45748Smckusick 	 *	the actual value is the next power of 2 greater
135*45748Smckusick 	 *	than the number of physical pages in the system.
136*45748Smckusick 	 *
137*45748Smckusick 	 *	Note:
138*45748Smckusick 	 *		This computation can be tweaked if desired.
139*45748Smckusick 	 */
140*45748Smckusick 
141*45748Smckusick 	vm_page_buckets = (queue_t) vaddr;
142*45748Smckusick 	bucket = vm_page_buckets;
143*45748Smckusick 	if (vm_page_bucket_count == 0) {
144*45748Smckusick 		vm_page_bucket_count = 1;
145*45748Smckusick 		while (vm_page_bucket_count < atop(end - start))
146*45748Smckusick 			vm_page_bucket_count <<= 1;
147*45748Smckusick 	}
148*45748Smckusick 
149*45748Smckusick 	vm_page_hash_mask = vm_page_bucket_count - 1;
150*45748Smckusick 
151*45748Smckusick 	/*
152*45748Smckusick 	 *	Validate these addresses.
153*45748Smckusick 	 */
154*45748Smckusick 
155*45748Smckusick 	new_start = round_page(((queue_t)start) + vm_page_bucket_count);
156*45748Smckusick 	mapped = vaddr;
157*45748Smckusick 	vaddr = pmap_map(mapped, start, new_start,
158*45748Smckusick 			VM_PROT_READ|VM_PROT_WRITE);
159*45748Smckusick 	start = new_start;
160*45748Smckusick 	blkclr((caddr_t) mapped, vaddr - mapped);
161*45748Smckusick 	mapped = vaddr;
162*45748Smckusick 
163*45748Smckusick 	for (i = vm_page_bucket_count; i--;) {
164*45748Smckusick 		queue_init(bucket);
165*45748Smckusick 		bucket++;
166*45748Smckusick 	}
167*45748Smckusick 
168*45748Smckusick 	simple_lock_init(&bucket_lock);
169*45748Smckusick 
170*45748Smckusick 	/*
171*45748Smckusick 	 *	round (or truncate) the addresses to our page size.
172*45748Smckusick 	 */
173*45748Smckusick 
174*45748Smckusick 	end = trunc_page(end);
175*45748Smckusick 
176*45748Smckusick 	/*
177*45748Smckusick 	 *	Pre-allocate maps and map entries that cannot be dynamically
178*45748Smckusick 	 *	allocated via malloc().  The maps include the kernel_map and
179*45748Smckusick 	 *	kmem_map which must be initialized before malloc() will
180*45748Smckusick 	 *	work (obviously).  Also could include pager maps which would
181*45748Smckusick 	 *	be allocated before kmeminit.
182*45748Smckusick 	 *
183*45748Smckusick 	 *	Allow some kernel map entries... this should be plenty
184*45748Smckusick 	 *	since people shouldn't be cluttering up the kernel
185*45748Smckusick 	 *	map (they should use their own maps).
186*45748Smckusick 	 */
187*45748Smckusick 
188*45748Smckusick 	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
189*45748Smckusick 			   MAX_KMAPENT * sizeof(struct vm_map_entry);
190*45748Smckusick 	kentry_data_size = round_page(kentry_data_size);
191*45748Smckusick 	kentry_data = (vm_offset_t) vaddr;
192*45748Smckusick 	vaddr += kentry_data_size;
193*45748Smckusick 
194*45748Smckusick 	/*
195*45748Smckusick 	 *	Validate these zone addresses.
196*45748Smckusick 	 */
197*45748Smckusick 
198*45748Smckusick 	new_start = start + (vaddr - mapped);
199*45748Smckusick 	pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
200*45748Smckusick 	blkclr((caddr_t) mapped, (vaddr - mapped));
201*45748Smckusick 	mapped = vaddr;
202*45748Smckusick 	start = new_start;
203*45748Smckusick 
204*45748Smckusick 	/*
205*45748Smckusick  	 *	Compute the number of pages of memory that will be
206*45748Smckusick 	 *	available for use (taking into account the overhead
207*45748Smckusick 	 *	of a page structure per page).
208*45748Smckusick 	 */
209*45748Smckusick 
210*45748Smckusick 	vm_page_free_count = npages =
211*45748Smckusick 		(end - start)/(PAGE_SIZE + sizeof(struct vm_page));
212*45748Smckusick 
213*45748Smckusick 	/*
214*45748Smckusick 	 *	Initialize the mem entry structures now, and
215*45748Smckusick 	 *	put them in the free queue.
216*45748Smckusick 	 */
217*45748Smckusick 
218*45748Smckusick 	m = vm_page_array = (vm_page_t) vaddr;
219*45748Smckusick 	first_page = start;
220*45748Smckusick 	first_page += npages*sizeof(struct vm_page);
221*45748Smckusick 	first_page = atop(round_page(first_page));
222*45748Smckusick 	last_page  = first_page + npages - 1;
223*45748Smckusick 
224*45748Smckusick 	first_phys_addr = ptoa(first_page);
225*45748Smckusick 	last_phys_addr  = ptoa(last_page) + PAGE_MASK;
226*45748Smckusick 
227*45748Smckusick 	/*
228*45748Smckusick 	 *	Validate these addresses.
229*45748Smckusick 	 */
230*45748Smckusick 
231*45748Smckusick 	new_start = start + (round_page(m + npages) - mapped);
232*45748Smckusick 	mapped = pmap_map(mapped, start, new_start,
233*45748Smckusick 			VM_PROT_READ|VM_PROT_WRITE);
234*45748Smckusick 	start = new_start;
235*45748Smckusick 
236*45748Smckusick 	/*
237*45748Smckusick 	 *	Clear all of the page structures
238*45748Smckusick 	 */
239*45748Smckusick 	blkclr((caddr_t)m, npages * sizeof(*m));
240*45748Smckusick 
241*45748Smckusick 	pa = first_phys_addr;
242*45748Smckusick 	while (npages--) {
243*45748Smckusick 		m->copy_on_write = FALSE;
244*45748Smckusick 		m->wanted = FALSE;
245*45748Smckusick 		m->inactive = FALSE;
246*45748Smckusick 		m->active = FALSE;
247*45748Smckusick 		m->busy = FALSE;
248*45748Smckusick 		m->object = VM_OBJECT_NULL;
249*45748Smckusick 		m->phys_addr = pa;
250*45748Smckusick 		queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
251*45748Smckusick 		m++;
252*45748Smckusick 		pa += PAGE_SIZE;
253*45748Smckusick 	}
254*45748Smckusick 
255*45748Smckusick 	/*
256*45748Smckusick 	 *	Initialize vm_pages_needed lock here - don't wait for pageout
257*45748Smckusick 	 *	daemon	XXX
258*45748Smckusick 	 */
259*45748Smckusick 	simple_lock_init(&vm_pages_needed_lock);
260*45748Smckusick 
261*45748Smckusick 	return(mapped);
262*45748Smckusick }
263*45748Smckusick 
264*45748Smckusick /*
265*45748Smckusick  *	vm_page_hash:
266*45748Smckusick  *
267*45748Smckusick  *	Distributes the object/offset key pair among hash buckets.
268*45748Smckusick  *
269*45748Smckusick  *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
270*45748Smckusick  */
271*45748Smckusick #define vm_page_hash(object, offset) \
272*45748Smckusick 	(((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
273*45748Smckusick 
274*45748Smckusick /*
275*45748Smckusick  *	vm_page_insert:		[ internal use only ]
276*45748Smckusick  *
277*45748Smckusick  *	Inserts the given mem entry into the object/object-page
278*45748Smckusick  *	table and object list.
279*45748Smckusick  *
280*45748Smckusick  *	The object and page must be locked.
281*45748Smckusick  */
282*45748Smckusick 
283*45748Smckusick void vm_page_insert(mem, object, offset)
284*45748Smckusick 	register vm_page_t	mem;
285*45748Smckusick 	register vm_object_t	object;
286*45748Smckusick 	register vm_offset_t	offset;
287*45748Smckusick {
288*45748Smckusick 	register queue_t	bucket;
289*45748Smckusick 	int			spl;
290*45748Smckusick 
291*45748Smckusick 	VM_PAGE_CHECK(mem);
292*45748Smckusick 
293*45748Smckusick 	if (mem->tabled)
294*45748Smckusick 		panic("vm_page_insert: already inserted");
295*45748Smckusick 
296*45748Smckusick 	/*
297*45748Smckusick 	 *	Record the object/offset pair in this page
298*45748Smckusick 	 */
299*45748Smckusick 
300*45748Smckusick 	mem->object = object;
301*45748Smckusick 	mem->offset = offset;
302*45748Smckusick 
303*45748Smckusick 	/*
304*45748Smckusick 	 *	Insert it into the object_object/offset hash table
305*45748Smckusick 	 */
306*45748Smckusick 
307*45748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
308*45748Smckusick 	spl = splimp();
309*45748Smckusick 	simple_lock(&bucket_lock);
310*45748Smckusick 	queue_enter(bucket, mem, vm_page_t, hashq);
311*45748Smckusick 	simple_unlock(&bucket_lock);
312*45748Smckusick 	(void) splx(spl);
313*45748Smckusick 
314*45748Smckusick 	/*
315*45748Smckusick 	 *	Now link into the object's list of backed pages.
316*45748Smckusick 	 */
317*45748Smckusick 
318*45748Smckusick 	queue_enter(&object->memq, mem, vm_page_t, listq);
319*45748Smckusick 	mem->tabled = TRUE;
320*45748Smckusick 
321*45748Smckusick 	/*
322*45748Smckusick 	 *	And show that the object has one more resident
323*45748Smckusick 	 *	page.
324*45748Smckusick 	 */
325*45748Smckusick 
326*45748Smckusick 	object->resident_page_count++;
327*45748Smckusick }
328*45748Smckusick 
329*45748Smckusick /*
330*45748Smckusick  *	vm_page_remove:		[ internal use only ]
331*45748Smckusick  *
332*45748Smckusick  *	Removes the given mem entry from the object/offset-page
333*45748Smckusick  *	table and the object page list.
334*45748Smckusick  *
335*45748Smckusick  *	The object and page must be locked.
336*45748Smckusick  */
337*45748Smckusick 
338*45748Smckusick void vm_page_remove(mem)
339*45748Smckusick 	register vm_page_t	mem;
340*45748Smckusick {
341*45748Smckusick 	register queue_t	bucket;
342*45748Smckusick 	int			spl;
343*45748Smckusick 
344*45748Smckusick 	VM_PAGE_CHECK(mem);
345*45748Smckusick 
346*45748Smckusick 	if (!mem->tabled)
347*45748Smckusick 		return;
348*45748Smckusick 
349*45748Smckusick 	/*
350*45748Smckusick 	 *	Remove from the object_object/offset hash table
351*45748Smckusick 	 */
352*45748Smckusick 
353*45748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
354*45748Smckusick 	spl = splimp();
355*45748Smckusick 	simple_lock(&bucket_lock);
356*45748Smckusick 	queue_remove(bucket, mem, vm_page_t, hashq);
357*45748Smckusick 	simple_unlock(&bucket_lock);
358*45748Smckusick 	(void) splx(spl);
359*45748Smckusick 
360*45748Smckusick 	/*
361*45748Smckusick 	 *	Now remove from the object's list of backed pages.
362*45748Smckusick 	 */
363*45748Smckusick 
364*45748Smckusick 	queue_remove(&mem->object->memq, mem, vm_page_t, listq);
365*45748Smckusick 
366*45748Smckusick 	/*
367*45748Smckusick 	 *	And show that the object has one fewer resident
368*45748Smckusick 	 *	page.
369*45748Smckusick 	 */
370*45748Smckusick 
371*45748Smckusick 	mem->object->resident_page_count--;
372*45748Smckusick 
373*45748Smckusick 	mem->tabled = FALSE;
374*45748Smckusick }
375*45748Smckusick 
376*45748Smckusick /*
377*45748Smckusick  *	vm_page_lookup:
378*45748Smckusick  *
379*45748Smckusick  *	Returns the page associated with the object/offset
380*45748Smckusick  *	pair specified; if none is found, VM_PAGE_NULL is returned.
381*45748Smckusick  *
382*45748Smckusick  *	The object must be locked.  No side effects.
383*45748Smckusick  */
384*45748Smckusick 
385*45748Smckusick vm_page_t vm_page_lookup(object, offset)
386*45748Smckusick 	register vm_object_t	object;
387*45748Smckusick 	register vm_offset_t	offset;
388*45748Smckusick {
389*45748Smckusick 	register vm_page_t	mem;
390*45748Smckusick 	register queue_t	bucket;
391*45748Smckusick 	int			spl;
392*45748Smckusick 
393*45748Smckusick 	/*
394*45748Smckusick 	 *	Search the hash table for this object/offset pair
395*45748Smckusick 	 */
396*45748Smckusick 
397*45748Smckusick 	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
398*45748Smckusick 
399*45748Smckusick 	spl = splimp();
400*45748Smckusick 	simple_lock(&bucket_lock);
401*45748Smckusick 	mem = (vm_page_t) queue_first(bucket);
402*45748Smckusick 	while (!queue_end(bucket, (queue_entry_t) mem)) {
403*45748Smckusick 		VM_PAGE_CHECK(mem);
404*45748Smckusick 		if ((mem->object == object) && (mem->offset == offset)) {
405*45748Smckusick 			simple_unlock(&bucket_lock);
406*45748Smckusick 			splx(spl);
407*45748Smckusick 			return(mem);
408*45748Smckusick 		}
409*45748Smckusick 		mem = (vm_page_t) queue_next(&mem->hashq);
410*45748Smckusick 	}
411*45748Smckusick 
412*45748Smckusick 	simple_unlock(&bucket_lock);
413*45748Smckusick 	splx(spl);
414*45748Smckusick 	return(VM_PAGE_NULL);
415*45748Smckusick }
416*45748Smckusick 
417*45748Smckusick /*
418*45748Smckusick  *	vm_page_rename:
419*45748Smckusick  *
420*45748Smckusick  *	Move the given memory entry from its
421*45748Smckusick  *	current object to the specified target object/offset.
422*45748Smckusick  *
423*45748Smckusick  *	The object must be locked.
424*45748Smckusick  */
425*45748Smckusick void vm_page_rename(mem, new_object, new_offset)
426*45748Smckusick 	register vm_page_t	mem;
427*45748Smckusick 	register vm_object_t	new_object;
428*45748Smckusick 	vm_offset_t		new_offset;
429*45748Smckusick {
430*45748Smckusick 	if (mem->object == new_object)
431*45748Smckusick 		return;
432*45748Smckusick 
433*45748Smckusick 	vm_page_lock_queues();	/* keep page from moving out from
434*45748Smckusick 				   under pageout daemon */
435*45748Smckusick     	vm_page_remove(mem);
436*45748Smckusick 	vm_page_insert(mem, new_object, new_offset);
437*45748Smckusick 	vm_page_unlock_queues();
438*45748Smckusick }
439*45748Smckusick 
440*45748Smckusick void		vm_page_init(mem, object, offset)
441*45748Smckusick 	vm_page_t	mem;
442*45748Smckusick 	vm_object_t	object;
443*45748Smckusick 	vm_offset_t	offset;
444*45748Smckusick {
445*45748Smckusick #define	vm_page_init(mem, object, offset)  {\
446*45748Smckusick 		(mem)->busy = TRUE; \
447*45748Smckusick 		(mem)->tabled = FALSE; \
448*45748Smckusick 		vm_page_insert((mem), (object), (offset)); \
449*45748Smckusick 		(mem)->absent = FALSE; \
450*45748Smckusick 		(mem)->fictitious = FALSE; \
451*45748Smckusick 		(mem)->page_lock = VM_PROT_NONE; \
452*45748Smckusick 		(mem)->unlock_request = VM_PROT_NONE; \
453*45748Smckusick 		(mem)->laundry = FALSE; \
454*45748Smckusick 		(mem)->active = FALSE; \
455*45748Smckusick 		(mem)->inactive = FALSE; \
456*45748Smckusick 		(mem)->wire_count = 0; \
457*45748Smckusick 		(mem)->clean = TRUE; \
458*45748Smckusick 		(mem)->copy_on_write = FALSE; \
459*45748Smckusick 		(mem)->fake = TRUE; \
460*45748Smckusick 	}
461*45748Smckusick 
462*45748Smckusick 	vm_page_init(mem, object, offset);
463*45748Smckusick }
464*45748Smckusick 
465*45748Smckusick /*
466*45748Smckusick  *	vm_page_alloc:
467*45748Smckusick  *
468*45748Smckusick  *	Allocate and return a memory cell associated
469*45748Smckusick  *	with this VM object/offset pair.
470*45748Smckusick  *
471*45748Smckusick  *	Object must be locked.
472*45748Smckusick  */
473*45748Smckusick vm_page_t vm_page_alloc(object, offset)
474*45748Smckusick 	vm_object_t	object;
475*45748Smckusick 	vm_offset_t	offset;
476*45748Smckusick {
477*45748Smckusick 	register vm_page_t	mem;
478*45748Smckusick 	int		spl;
479*45748Smckusick 
480*45748Smckusick 	spl = splimp();				/* XXX */
481*45748Smckusick 	simple_lock(&vm_page_queue_free_lock);
482*45748Smckusick 	if (queue_empty(&vm_page_queue_free)) {
483*45748Smckusick 		simple_unlock(&vm_page_queue_free_lock);
484*45748Smckusick 		splx(spl);
485*45748Smckusick 		return(VM_PAGE_NULL);
486*45748Smckusick 	}
487*45748Smckusick 
488*45748Smckusick 	queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
489*45748Smckusick 
490*45748Smckusick 	vm_page_free_count--;
491*45748Smckusick 	simple_unlock(&vm_page_queue_free_lock);
492*45748Smckusick 	splx(spl);
493*45748Smckusick 
494*45748Smckusick 	vm_page_init(mem, object, offset);
495*45748Smckusick 
496*45748Smckusick 	/*
497*45748Smckusick 	 *	Decide if we should poke the pageout daemon.
498*45748Smckusick 	 *	We do this if the free count is less than the low
499*45748Smckusick 	 *	water mark, or if the free count is less than the high
500*45748Smckusick 	 *	water mark (but above the low water mark) and the inactive
501*45748Smckusick 	 *	count is less than its target.
502*45748Smckusick 	 *
503*45748Smckusick 	 *	We don't have the counts locked ... if they change a little,
504*45748Smckusick 	 *	it doesn't really matter.
505*45748Smckusick 	 */
506*45748Smckusick 
507*45748Smckusick 	if ((vm_page_free_count < vm_page_free_min) ||
508*45748Smckusick 			((vm_page_free_count < vm_page_free_target) &&
509*45748Smckusick 			(vm_page_inactive_count < vm_page_inactive_target)))
510*45748Smckusick 		thread_wakeup(&vm_pages_needed);
511*45748Smckusick 	return(mem);
512*45748Smckusick }
513*45748Smckusick 
514*45748Smckusick /*
515*45748Smckusick  *	vm_page_free:
516*45748Smckusick  *
517*45748Smckusick  *	Returns the given page to the free list,
518*45748Smckusick  *	disassociating it with any VM object.
519*45748Smckusick  *
520*45748Smckusick  *	Object and page must be locked prior to entry.
521*45748Smckusick  */
522*45748Smckusick void vm_page_free(mem)
523*45748Smckusick 	register vm_page_t	mem;
524*45748Smckusick {
525*45748Smckusick 	vm_page_remove(mem);
526*45748Smckusick 	if (mem->active) {
527*45748Smckusick 		queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
528*45748Smckusick 		mem->active = FALSE;
529*45748Smckusick 		vm_page_active_count--;
530*45748Smckusick 	}
531*45748Smckusick 
532*45748Smckusick 	if (mem->inactive) {
533*45748Smckusick 		queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
534*45748Smckusick 		mem->inactive = FALSE;
535*45748Smckusick 		vm_page_inactive_count--;
536*45748Smckusick 	}
537*45748Smckusick 
538*45748Smckusick 	if (!mem->fictitious) {
539*45748Smckusick 		int	spl;
540*45748Smckusick 
541*45748Smckusick 		spl = splimp();
542*45748Smckusick 		simple_lock(&vm_page_queue_free_lock);
543*45748Smckusick 		queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
544*45748Smckusick 
545*45748Smckusick 		vm_page_free_count++;
546*45748Smckusick 		simple_unlock(&vm_page_queue_free_lock);
547*45748Smckusick 		splx(spl);
548*45748Smckusick 	}
549*45748Smckusick }
550*45748Smckusick 
551*45748Smckusick /*
552*45748Smckusick  *	vm_page_wire:
553*45748Smckusick  *
554*45748Smckusick  *	Mark this page as wired down by yet
555*45748Smckusick  *	another map, removing it from paging queues
556*45748Smckusick  *	as necessary.
557*45748Smckusick  *
558*45748Smckusick  *	The page queues must be locked.
559*45748Smckusick  */
560*45748Smckusick void vm_page_wire(mem)
561*45748Smckusick 	register vm_page_t	mem;
562*45748Smckusick {
563*45748Smckusick 	VM_PAGE_CHECK(mem);
564*45748Smckusick 
565*45748Smckusick 	if (mem->wire_count == 0) {
566*45748Smckusick 		if (mem->active) {
567*45748Smckusick 			queue_remove(&vm_page_queue_active, mem, vm_page_t,
568*45748Smckusick 						pageq);
569*45748Smckusick 			vm_page_active_count--;
570*45748Smckusick 			mem->active = FALSE;
571*45748Smckusick 		}
572*45748Smckusick 		if (mem->inactive) {
573*45748Smckusick 			queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
574*45748Smckusick 						pageq);
575*45748Smckusick 			vm_page_inactive_count--;
576*45748Smckusick 			mem->inactive = FALSE;
577*45748Smckusick 		}
578*45748Smckusick 		vm_page_wire_count++;
579*45748Smckusick 	}
580*45748Smckusick 	mem->wire_count++;
581*45748Smckusick }
582*45748Smckusick 
583*45748Smckusick /*
584*45748Smckusick  *	vm_page_unwire:
585*45748Smckusick  *
586*45748Smckusick  *	Release one wiring of this page, potentially
587*45748Smckusick  *	enabling it to be paged again.
588*45748Smckusick  *
589*45748Smckusick  *	The page queues must be locked.
590*45748Smckusick  */
591*45748Smckusick void vm_page_unwire(mem)
592*45748Smckusick 	register vm_page_t	mem;
593*45748Smckusick {
594*45748Smckusick 	VM_PAGE_CHECK(mem);
595*45748Smckusick 
596*45748Smckusick 	mem->wire_count--;
597*45748Smckusick 	if (mem->wire_count == 0) {
598*45748Smckusick 		queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
599*45748Smckusick 		vm_page_active_count++;
600*45748Smckusick 		mem->active = TRUE;
601*45748Smckusick 		vm_page_wire_count--;
602*45748Smckusick 	}
603*45748Smckusick }
604*45748Smckusick 
605*45748Smckusick /*
606*45748Smckusick  *	vm_page_deactivate:
607*45748Smckusick  *
608*45748Smckusick  *	Returns the given page to the inactive list,
609*45748Smckusick  *	indicating that no physical maps have access
610*45748Smckusick  *	to this page.  [Used by the physical mapping system.]
611*45748Smckusick  *
612*45748Smckusick  *	The page queues must be locked.
613*45748Smckusick  */
614*45748Smckusick void vm_page_deactivate(m)
615*45748Smckusick 	register vm_page_t	m;
616*45748Smckusick {
617*45748Smckusick 	VM_PAGE_CHECK(m);
618*45748Smckusick 
619*45748Smckusick 	/*
620*45748Smckusick 	 *	Only move active pages -- ignore locked or already
621*45748Smckusick 	 *	inactive ones.
622*45748Smckusick 	 */
623*45748Smckusick 
624*45748Smckusick 	if (m->active) {
625*45748Smckusick 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
626*45748Smckusick 		queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
627*45748Smckusick 		queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
628*45748Smckusick 		m->active = FALSE;
629*45748Smckusick 		m->inactive = TRUE;
630*45748Smckusick 		vm_page_active_count--;
631*45748Smckusick 		vm_page_inactive_count++;
632*45748Smckusick 		if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
633*45748Smckusick 			m->clean = FALSE;
634*45748Smckusick 		m->laundry = !m->clean;
635*45748Smckusick 	}
636*45748Smckusick }
637*45748Smckusick 
638*45748Smckusick /*
639*45748Smckusick  *	vm_page_activate:
640*45748Smckusick  *
641*45748Smckusick  *	Put the specified page on the active list (if appropriate).
642*45748Smckusick  *
643*45748Smckusick  *	The page queues must be locked.
644*45748Smckusick  */
645*45748Smckusick 
646*45748Smckusick void vm_page_activate(m)
647*45748Smckusick 	register vm_page_t	m;
648*45748Smckusick {
649*45748Smckusick 	VM_PAGE_CHECK(m);
650*45748Smckusick 
651*45748Smckusick 	if (m->inactive) {
652*45748Smckusick 		queue_remove(&vm_page_queue_inactive, m, vm_page_t,
653*45748Smckusick 						pageq);
654*45748Smckusick 		vm_page_inactive_count--;
655*45748Smckusick 		m->inactive = FALSE;
656*45748Smckusick 	}
657*45748Smckusick 	if (m->wire_count == 0) {
658*45748Smckusick 		if (m->active)
659*45748Smckusick 			panic("vm_page_activate: already active");
660*45748Smckusick 
661*45748Smckusick 		queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
662*45748Smckusick 		m->active = TRUE;
663*45748Smckusick 		vm_page_active_count++;
664*45748Smckusick 	}
665*45748Smckusick }
666*45748Smckusick 
667*45748Smckusick /*
668*45748Smckusick  *	vm_page_zero_fill:
669*45748Smckusick  *
670*45748Smckusick  *	Zero-fill the specified page.
671*45748Smckusick  *	Written as a standard pagein routine, to
672*45748Smckusick  *	be used by the zero-fill object.
673*45748Smckusick  */
674*45748Smckusick 
675*45748Smckusick boolean_t vm_page_zero_fill(m)
676*45748Smckusick 	vm_page_t	m;
677*45748Smckusick {
678*45748Smckusick 	VM_PAGE_CHECK(m);
679*45748Smckusick 
680*45748Smckusick 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
681*45748Smckusick 	return(TRUE);
682*45748Smckusick }
683*45748Smckusick 
684*45748Smckusick /*
685*45748Smckusick  *	vm_page_copy:
686*45748Smckusick  *
687*45748Smckusick  *	Copy one page to another
688*45748Smckusick  */
689*45748Smckusick 
690*45748Smckusick void vm_page_copy(src_m, dest_m)
691*45748Smckusick 	vm_page_t	src_m;
692*45748Smckusick 	vm_page_t	dest_m;
693*45748Smckusick {
694*45748Smckusick 	VM_PAGE_CHECK(src_m);
695*45748Smckusick 	VM_PAGE_CHECK(dest_m);
696*45748Smckusick 
697*45748Smckusick 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
698*45748Smckusick }
699