xref: /dflybsd-src/sys/vm/vm_page.c (revision d54592ee9e96c920b951af2e00cd72c0081ccae3)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
38  * $DragonFly: src/sys/vm/vm_page.c,v 1.39 2008/07/01 02:02:56 dillon Exp $
39  */
40 
41 /*
42  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43  * All rights reserved.
44  *
45  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46  *
47  * Permission to use, copy, modify and distribute this software and
48  * its documentation is hereby granted, provided that both the copyright
49  * notice and this permission notice appear in all copies of the
50  * software, derivative works or modified versions, and any portions
51  * thereof, and that both notices appear in supporting documentation.
52  *
53  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56  *
57  * Carnegie Mellon requests users of this software to return to
58  *
59  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
60  *  School of Computer Science
61  *  Carnegie Mellon University
62  *  Pittsburgh PA 15213-3890
63  *
64  * any improvements or extensions that they make and grant Carnegie the
65  * rights to redistribute these changes.
66  */
67 /*
68  * Resident memory management module.  The module manipulates 'VM pages'.
69  * A VM page is the core building block for memory management.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 #include <sys/vmmeter.h>
77 #include <sys/vnode.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/vm_kern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_page2.h>
91 
92 static void vm_page_queue_init(void);
93 static void vm_page_free_wakeup(void);
94 static vm_page_t vm_page_select_cache(vm_object_t, vm_pindex_t);
95 static vm_page_t _vm_page_list_find2(int basequeue, int index);
96 
97 struct vpgqueues vm_page_queues[PQ_COUNT]; /* Array of tailq lists */
98 
99 #define ASSERT_IN_CRIT_SECTION()	KKASSERT(crit_test(curthread));
100 
101 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
102 	     vm_pindex_t, pindex);
103 
104 static void
105 vm_page_queue_init(void)
106 {
107 	int i;
108 
109 	for (i = 0; i < PQ_L2_SIZE; i++)
110 		vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
111 	for (i = 0; i < PQ_L2_SIZE; i++)
112 		vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
113 
114 	vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count;
115 	vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count;
116 	vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count;
117 	/* PQ_NONE has no queue */
118 
119 	for (i = 0; i < PQ_COUNT; i++)
120 		TAILQ_INIT(&vm_page_queues[i].pl);
121 }
122 
123 /*
124  * note: place in initialized data section?  Is this necessary?
125  */
126 long first_page = 0;
127 int vm_page_array_size = 0;
128 int vm_page_zero_count = 0;
129 vm_page_t vm_page_array = 0;
130 
131 /*
132  * (low level boot)
133  *
134  * Sets the page size, perhaps based upon the memory size.
135  * Must be called before any use of page-size dependent functions.
136  */
137 void
138 vm_set_page_size(void)
139 {
140 	if (vmstats.v_page_size == 0)
141 		vmstats.v_page_size = PAGE_SIZE;
142 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
143 		panic("vm_set_page_size: page size not a power of two");
144 }
145 
146 /*
147  * (low level boot)
148  *
149  * Add a new page to the freelist for use by the system.  New pages
150  * are added to both the head and tail of the associated free page
151  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
152  * requests pull 'recent' adds (higher physical addresses) first.
153  *
154  * Must be called in a critical section.
155  */
156 vm_page_t
157 vm_add_new_page(vm_paddr_t pa)
158 {
159 	struct vpgqueues *vpq;
160 	vm_page_t m;
161 
162 	++vmstats.v_page_count;
163 	++vmstats.v_free_count;
164 	m = PHYS_TO_VM_PAGE(pa);
165 	m->phys_addr = pa;
166 	m->flags = 0;
167 	m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
168 	m->queue = m->pc + PQ_FREE;
169 	KKASSERT(m->dirty == 0);
170 
171 	vpq = &vm_page_queues[m->queue];
172 	if (vpq->flipflop)
173 		TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
174 	else
175 		TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
176 	vpq->flipflop = 1 - vpq->flipflop;
177 
178 	vm_page_queues[m->queue].lcnt++;
179 	return (m);
180 }
181 
182 /*
183  * (low level boot)
184  *
185  * Initializes the resident memory module.
186  *
187  * Allocates memory for the page cells, and for the object/offset-to-page
188  * hash table headers.  Each page cell is initialized and placed on the
189  * free list.
190  *
191  * starta/enda represents the range of physical memory addresses available
192  * for use (skipping memory already used by the kernel), subject to
193  * phys_avail[].  Note that phys_avail[] has already mapped out memory
194  * already in use by the kernel.
195  */
196 vm_offset_t
197 vm_page_startup(vm_offset_t vaddr)
198 {
199 	vm_offset_t mapped;
200 	vm_size_t npages;
201 	vm_paddr_t page_range;
202 	vm_paddr_t new_end;
203 	int i;
204 	vm_paddr_t pa;
205 	int nblocks;
206 	vm_paddr_t last_pa;
207 	vm_paddr_t end;
208 	vm_paddr_t biggestone, biggestsize;
209 	vm_paddr_t total;
210 
211 	total = 0;
212 	biggestsize = 0;
213 	biggestone = 0;
214 	nblocks = 0;
215 	vaddr = round_page(vaddr);
216 
217 	for (i = 0; phys_avail[i + 1]; i += 2) {
218 		phys_avail[i] = round_page(phys_avail[i]);
219 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
220 	}
221 
222 	for (i = 0; phys_avail[i + 1]; i += 2) {
223 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
224 
225 		if (size > biggestsize) {
226 			biggestone = i;
227 			biggestsize = size;
228 		}
229 		++nblocks;
230 		total += size;
231 	}
232 
233 	end = phys_avail[biggestone+1];
234 	end = trunc_page(end);
235 
236 	/*
237 	 * Initialize the queue headers for the free queue, the active queue
238 	 * and the inactive queue.
239 	 */
240 
241 	vm_page_queue_init();
242 
243 	/*
244 	 * Compute the number of pages of memory that will be available for
245 	 * use (taking into account the overhead of a page structure per
246 	 * page).
247 	 */
248 	first_page = phys_avail[0] / PAGE_SIZE;
249 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
250 	npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
251 
252 	/*
253 	 * Initialize the mem entry structures now, and put them in the free
254 	 * queue.
255 	 */
256 	vm_page_array = (vm_page_t) vaddr;
257 	mapped = vaddr;
258 
259 	/*
260 	 * Validate these addresses.
261 	 */
262 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
263 	mapped = pmap_map(mapped, new_end, end,
264 	    VM_PROT_READ | VM_PROT_WRITE);
265 
266 	/*
267 	 * Clear all of the page structures
268 	 */
269 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
270 	vm_page_array_size = page_range;
271 
272 	/*
273 	 * Construct the free queue(s) in ascending order (by physical
274 	 * address) so that the first 16MB of physical memory is allocated
275 	 * last rather than first.  On large-memory machines, this avoids
276 	 * the exhaustion of low physical memory before isa_dmainit has run.
277 	 */
278 	vmstats.v_page_count = 0;
279 	vmstats.v_free_count = 0;
280 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
281 		pa = phys_avail[i];
282 		if (i == biggestone)
283 			last_pa = new_end;
284 		else
285 			last_pa = phys_avail[i + 1];
286 		while (pa < last_pa && npages-- > 0) {
287 			vm_add_new_page(pa);
288 			pa += PAGE_SIZE;
289 		}
290 	}
291 	return (mapped);
292 }
293 
294 /*
295  * Scan comparison function for Red-Black tree scans.  An inclusive
296  * (start,end) is expected.  Other fields are not used.
297  */
298 int
299 rb_vm_page_scancmp(struct vm_page *p, void *data)
300 {
301 	struct rb_vm_page_scan_info *info = data;
302 
303 	if (p->pindex < info->start_pindex)
304 		return(-1);
305 	if (p->pindex > info->end_pindex)
306 		return(1);
307 	return(0);
308 }
309 
310 int
311 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
312 {
313 	if (p1->pindex < p2->pindex)
314 		return(-1);
315 	if (p1->pindex > p2->pindex)
316 		return(1);
317 	return(0);
318 }
319 
320 /*
321  * The opposite of vm_page_hold().  A page can be freed while being held,
322  * which places it on the PQ_HOLD queue.  We must call vm_page_free_toq()
323  * in this case to actually free it once the hold count drops to 0.
324  *
325  * This routine must be called at splvm().
326  */
327 void
328 vm_page_unhold(vm_page_t mem)
329 {
330 	--mem->hold_count;
331 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
332 	if (mem->hold_count == 0 && mem->queue == PQ_HOLD) {
333 		vm_page_busy(mem);
334 		vm_page_free_toq(mem);
335 	}
336 }
337 
338 /*
339  * Inserts the given mem entry into the object and object list.
340  *
341  * The pagetables are not updated but will presumably fault the page
342  * in if necessary, or if a kernel page the caller will at some point
343  * enter the page into the kernel's pmap.  We are not allowed to block
344  * here so we *can't* do this anyway.
345  *
346  * This routine may not block.
347  * This routine must be called with a critical section held.
348  */
349 void
350 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
351 {
352 	ASSERT_IN_CRIT_SECTION();
353 	if (m->object != NULL)
354 		panic("vm_page_insert: already inserted");
355 
356 	/*
357 	 * Record the object/offset pair in this page
358 	 */
359 	m->object = object;
360 	m->pindex = pindex;
361 
362 	/*
363 	 * Insert it into the object.
364 	 */
365 	vm_page_rb_tree_RB_INSERT(&object->rb_memq, m);
366 	object->generation++;
367 
368 	/*
369 	 * show that the object has one more resident page.
370 	 */
371 	object->resident_page_count++;
372 
373 	/*
374 	 * Since we are inserting a new and possibly dirty page,
375 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
376 	 */
377 	if ((m->valid & m->dirty) || (m->flags & PG_WRITEABLE))
378 		vm_object_set_writeable_dirty(object);
379 }
380 
381 /*
382  * Removes the given vm_page_t from the global (object,index) hash table
383  * and from the object's memq.
384  *
385  * The underlying pmap entry (if any) is NOT removed here.
386  * This routine may not block.
387  *
388  * The page must be BUSY and will remain BUSY on return.  No spl needs to be
389  * held on call to this routine.
390  *
391  * note: FreeBSD side effect was to unbusy the page on return.  We leave
392  * it busy.
393  */
394 void
395 vm_page_remove(vm_page_t m)
396 {
397 	vm_object_t object;
398 
399 	crit_enter();
400 	if (m->object == NULL) {
401 		crit_exit();
402 		return;
403 	}
404 
405 	if ((m->flags & PG_BUSY) == 0)
406 		panic("vm_page_remove: page not busy");
407 
408 	object = m->object;
409 
410 	/*
411 	 * Remove the page from the object and update the object.
412 	 */
413 	vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
414 	object->resident_page_count--;
415 	object->generation++;
416 	m->object = NULL;
417 
418 	crit_exit();
419 }
420 
421 /*
422  * Locate and return the page at (object, pindex), or NULL if the
423  * page could not be found.
424  *
425  * This routine will operate properly without spl protection, but
426  * the returned page could be in flux if it is busy.  Because an
427  * interrupt can race a caller's busy check (unbusying and freeing the
428  * page we return before the caller is able to check the busy bit),
429  * the caller should generally call this routine with a critical
430  * section held.
431  *
432  * Callers may call this routine without spl protection if they know
433  * 'for sure' that the page will not be ripped out from under them
434  * by an interrupt.
435  */
436 vm_page_t
437 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
438 {
439 	vm_page_t m;
440 
441 	/*
442 	 * Search the hash table for this object/offset pair
443 	 */
444 	crit_enter();
445 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
446 	crit_exit();
447 	KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
448 	return(m);
449 }
450 
451 /*
452  * vm_page_rename()
453  *
454  * Move the given memory entry from its current object to the specified
455  * target object/offset.
456  *
457  * The object must be locked.
458  * This routine may not block.
459  *
460  * Note: This routine will raise itself to splvm(), the caller need not.
461  *
462  * Note: Swap associated with the page must be invalidated by the move.  We
463  *       have to do this for several reasons:  (1) we aren't freeing the
464  *       page, (2) we are dirtying the page, (3) the VM system is probably
465  *       moving the page from object A to B, and will then later move
466  *       the backing store from A to B and we can't have a conflict.
467  *
468  * Note: We *always* dirty the page.  It is necessary both for the
469  *       fact that we moved it, and because we may be invalidating
470  *	 swap.  If the page is on the cache, we have to deactivate it
471  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
472  *	 on the cache.
473  */
474 void
475 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
476 {
477 	crit_enter();
478 	vm_page_remove(m);
479 	vm_page_insert(m, new_object, new_pindex);
480 	if (m->queue - m->pc == PQ_CACHE)
481 		vm_page_deactivate(m);
482 	vm_page_dirty(m);
483 	vm_page_wakeup(m);
484 	crit_exit();
485 }
486 
487 /*
488  * vm_page_unqueue() without any wakeup.  This routine is used when a page
489  * is being moved between queues or otherwise is to remain BUSYied by the
490  * caller.
491  *
492  * This routine must be called at splhigh().
493  * This routine may not block.
494  */
495 void
496 vm_page_unqueue_nowakeup(vm_page_t m)
497 {
498 	int queue = m->queue;
499 	struct vpgqueues *pq;
500 
501 	if (queue != PQ_NONE) {
502 		pq = &vm_page_queues[queue];
503 		m->queue = PQ_NONE;
504 		TAILQ_REMOVE(&pq->pl, m, pageq);
505 		(*pq->cnt)--;
506 		pq->lcnt--;
507 	}
508 }
509 
510 /*
511  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
512  * if necessary.
513  *
514  * This routine must be called at splhigh().
515  * This routine may not block.
516  */
517 void
518 vm_page_unqueue(vm_page_t m)
519 {
520 	int queue = m->queue;
521 	struct vpgqueues *pq;
522 
523 	if (queue != PQ_NONE) {
524 		m->queue = PQ_NONE;
525 		pq = &vm_page_queues[queue];
526 		TAILQ_REMOVE(&pq->pl, m, pageq);
527 		(*pq->cnt)--;
528 		pq->lcnt--;
529 		if ((queue - m->pc) == PQ_CACHE) {
530 			if (vm_paging_needed())
531 				pagedaemon_wakeup();
532 		}
533 	}
534 }
535 
536 /*
537  * vm_page_list_find()
538  *
539  * Find a page on the specified queue with color optimization.
540  *
541  * The page coloring optimization attempts to locate a page that does
542  * not overload other nearby pages in the object in the cpu's L1 or L2
543  * caches.  We need this optimization because cpu caches tend to be
544  * physical caches, while object spaces tend to be virtual.
545  *
546  * This routine must be called at splvm().
547  * This routine may not block.
548  *
549  * Note that this routine is carefully inlined.  A non-inlined version
550  * is available for outside callers but the only critical path is
551  * from within this source file.
552  */
553 static __inline
554 vm_page_t
555 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
556 {
557 	vm_page_t m;
558 
559 	if (prefer_zero)
560 		m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
561 	else
562 		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
563 	if (m == NULL)
564 		m = _vm_page_list_find2(basequeue, index);
565 	return(m);
566 }
567 
568 static vm_page_t
569 _vm_page_list_find2(int basequeue, int index)
570 {
571 	int i;
572 	vm_page_t m = NULL;
573 	struct vpgqueues *pq;
574 
575 	pq = &vm_page_queues[basequeue];
576 
577 	/*
578 	 * Note that for the first loop, index+i and index-i wind up at the
579 	 * same place.  Even though this is not totally optimal, we've already
580 	 * blown it by missing the cache case so we do not care.
581 	 */
582 
583 	for(i = PQ_L2_SIZE / 2; i > 0; --i) {
584 		if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
585 			break;
586 
587 		if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
588 			break;
589 	}
590 	return(m);
591 }
592 
593 vm_page_t
594 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
595 {
596 	return(_vm_page_list_find(basequeue, index, prefer_zero));
597 }
598 
599 /*
600  * Find a page on the cache queue with color optimization.  As pages
601  * might be found, but not applicable, they are deactivated.  This
602  * keeps us from using potentially busy cached pages.
603  *
604  * This routine must be called with a critical section held.
605  * This routine may not block.
606  */
607 vm_page_t
608 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
609 {
610 	vm_page_t m;
611 
612 	while (TRUE) {
613 		m = _vm_page_list_find(
614 		    PQ_CACHE,
615 		    (pindex + object->pg_color) & PQ_L2_MASK,
616 		    FALSE
617 		);
618 		if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
619 			       m->hold_count || m->wire_count)) {
620 			vm_page_deactivate(m);
621 			continue;
622 		}
623 		return m;
624 	}
625 	/* not reached */
626 }
627 
628 /*
629  * Find a free or zero page, with specified preference.  We attempt to
630  * inline the nominal case and fall back to _vm_page_select_free()
631  * otherwise.
632  *
633  * This routine must be called with a critical section held.
634  * This routine may not block.
635  */
636 static __inline vm_page_t
637 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
638 {
639 	vm_page_t m;
640 
641 	m = _vm_page_list_find(
642 		PQ_FREE,
643 		(pindex + object->pg_color) & PQ_L2_MASK,
644 		prefer_zero
645 	);
646 	return(m);
647 }
648 
649 /*
650  * vm_page_alloc()
651  *
652  * Allocate and return a memory cell associated with this VM object/offset
653  * pair.
654  *
655  *	page_req classes:
656  *
657  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
658  *	VM_ALLOC_SYSTEM		greater free drain
659  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
660  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page
661  *
662  * The object must be locked.
663  * This routine may not block.
664  * The returned page will be marked PG_BUSY
665  *
666  * Additional special handling is required when called from an interrupt
667  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
668  * in this case.
669  */
670 vm_page_t
671 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
672 {
673 	vm_page_t m = NULL;
674 
675 	KASSERT(!vm_page_lookup(object, pindex),
676 		("vm_page_alloc: page already allocated"));
677 	KKASSERT(page_req &
678 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
679 
680 	/*
681 	 * Certain system threads (pageout daemon, buf_daemon's) are
682 	 * allowed to eat deeper into the free page list.
683 	 */
684 	if (curthread->td_flags & TDF_SYSTHREAD)
685 		page_req |= VM_ALLOC_SYSTEM;
686 
687 	crit_enter();
688 loop:
689 	if (vmstats.v_free_count > vmstats.v_free_reserved ||
690 	    ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
691 	    ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
692 		vmstats.v_free_count > vmstats.v_interrupt_free_min)
693 	) {
694 		/*
695 		 * The free queue has sufficient free pages to take one out.
696 		 */
697 		if (page_req & VM_ALLOC_ZERO)
698 			m = vm_page_select_free(object, pindex, TRUE);
699 		else
700 			m = vm_page_select_free(object, pindex, FALSE);
701 	} else if (page_req & VM_ALLOC_NORMAL) {
702 		/*
703 		 * Allocatable from the cache (non-interrupt only).  On
704 		 * success, we must free the page and try again, thus
705 		 * ensuring that vmstats.v_*_free_min counters are replenished.
706 		 */
707 #ifdef INVARIANTS
708 		if (curthread->td_preempted) {
709 			kprintf("vm_page_alloc(): warning, attempt to allocate"
710 				" cache page from preempting interrupt\n");
711 			m = NULL;
712 		} else {
713 			m = vm_page_select_cache(object, pindex);
714 		}
715 #else
716 		m = vm_page_select_cache(object, pindex);
717 #endif
718 		/*
719 		 * On success move the page into the free queue and loop.
720 		 */
721 		if (m != NULL) {
722 			KASSERT(m->dirty == 0,
723 			    ("Found dirty cache page %p", m));
724 			vm_page_busy(m);
725 			vm_page_protect(m, VM_PROT_NONE);
726 			vm_page_free(m);
727 			goto loop;
728 		}
729 
730 		/*
731 		 * On failure return NULL
732 		 */
733 		crit_exit();
734 #if defined(DIAGNOSTIC)
735 		if (vmstats.v_cache_count > 0)
736 			kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
737 #endif
738 		vm_pageout_deficit++;
739 		pagedaemon_wakeup();
740 		return (NULL);
741 	} else {
742 		/*
743 		 * No pages available, wakeup the pageout daemon and give up.
744 		 */
745 		crit_exit();
746 		vm_pageout_deficit++;
747 		pagedaemon_wakeup();
748 		return (NULL);
749 	}
750 
751 	/*
752 	 * Good page found.  The page has not yet been busied.  We are in
753 	 * a critical section.
754 	 */
755 	KASSERT(m != NULL, ("vm_page_alloc(): missing page on free queue\n"));
756 	KASSERT(m->dirty == 0,
757 		("vm_page_alloc: free/cache page %p was dirty", m));
758 
759 	/*
760 	 * Remove from free queue
761 	 */
762 	vm_page_unqueue_nowakeup(m);
763 
764 	/*
765 	 * Initialize structure.  Only the PG_ZERO flag is inherited.  Set
766 	 * the page PG_BUSY
767 	 */
768 	if (m->flags & PG_ZERO) {
769 		vm_page_zero_count--;
770 		m->flags = PG_ZERO | PG_BUSY;
771 	} else {
772 		m->flags = PG_BUSY;
773 	}
774 	m->wire_count = 0;
775 	m->hold_count = 0;
776 	m->act_count = 0;
777 	m->busy = 0;
778 	m->valid = 0;
779 
780 	/*
781 	 * vm_page_insert() is safe prior to the crit_exit().  Note also that
782 	 * inserting a page here does not insert it into the pmap (which
783 	 * could cause us to block allocating memory).  We cannot block
784 	 * anywhere.
785 	 */
786 	vm_page_insert(m, object, pindex);
787 
788 	/*
789 	 * Don't wakeup too often - wakeup the pageout daemon when
790 	 * we would be nearly out of memory.
791 	 */
792 	if (vm_paging_needed())
793 		pagedaemon_wakeup();
794 
795 	crit_exit();
796 
797 	/*
798 	 * A PG_BUSY page is returned.
799 	 */
800 	return (m);
801 }
802 
803 /*
804  * Block until free pages are available for allocation, called in various
805  * places before memory allocations.
806  */
807 void
808 vm_wait(int timo)
809 {
810 	crit_enter();
811 	if (curthread == pagethread) {
812 		vm_pageout_pages_needed = 1;
813 		tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
814 	} else {
815 		if (!vm_pages_needed) {
816 			vm_pages_needed = 1;
817 			wakeup(&vm_pages_needed);
818 		}
819 		tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
820 	}
821 	crit_exit();
822 }
823 
824 /*
825  * Block until free pages are available for allocation
826  *
827  * Called only in vm_fault so that processes page faulting can be
828  * easily tracked.
829  *
830  * Sleeps at a lower priority than vm_wait() so that vm_wait()ing
831  * processes will be able to grab memory first.  Do not change
832  * this balance without careful testing first.
833  */
834 void
835 vm_waitpfault(void)
836 {
837 	crit_enter();
838 	if (!vm_pages_needed) {
839 		vm_pages_needed = 1;
840 		wakeup(&vm_pages_needed);
841 	}
842 	tsleep(&vmstats.v_free_count, 0, "pfault", 0);
843 	crit_exit();
844 }
845 
846 /*
847  * Put the specified page on the active list (if appropriate).  Ensure
848  * that act_count is at least ACT_INIT but do not otherwise mess with it.
849  *
850  * The page queues must be locked.
851  * This routine may not block.
852  */
853 void
854 vm_page_activate(vm_page_t m)
855 {
856 	crit_enter();
857 	if (m->queue != PQ_ACTIVE) {
858 		if ((m->queue - m->pc) == PQ_CACHE)
859 			mycpu->gd_cnt.v_reactivated++;
860 
861 		vm_page_unqueue(m);
862 
863 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
864 			m->queue = PQ_ACTIVE;
865 			vm_page_queues[PQ_ACTIVE].lcnt++;
866 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl,
867 					    m, pageq);
868 			if (m->act_count < ACT_INIT)
869 				m->act_count = ACT_INIT;
870 			vmstats.v_active_count++;
871 		}
872 	} else {
873 		if (m->act_count < ACT_INIT)
874 			m->act_count = ACT_INIT;
875 	}
876 	crit_exit();
877 }
878 
879 /*
880  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
881  * routine is called when a page has been added to the cache or free
882  * queues.
883  *
884  * This routine may not block.
885  * This routine must be called at splvm()
886  */
887 static __inline void
888 vm_page_free_wakeup(void)
889 {
890 	/*
891 	 * if pageout daemon needs pages, then tell it that there are
892 	 * some free.
893 	 */
894 	if (vm_pageout_pages_needed &&
895 	    vmstats.v_cache_count + vmstats.v_free_count >=
896 	    vmstats.v_pageout_free_min
897 	) {
898 		wakeup(&vm_pageout_pages_needed);
899 		vm_pageout_pages_needed = 0;
900 	}
901 
902 	/*
903 	 * wakeup processes that are waiting on memory if we hit a
904 	 * high water mark. And wakeup scheduler process if we have
905 	 * lots of memory. this process will swapin processes.
906 	 */
907 	if (vm_pages_needed && !vm_page_count_min()) {
908 		vm_pages_needed = 0;
909 		wakeup(&vmstats.v_free_count);
910 	}
911 }
912 
913 /*
914  *	vm_page_free_toq:
915  *
916  *	Returns the given page to the PQ_FREE list, disassociating it with
917  *	any VM object.
918  *
919  *	The vm_page must be PG_BUSY on entry.  PG_BUSY will be released on
920  *	return (the page will have been freed).  No particular spl is required
921  *	on entry.
922  *
923  *	This routine may not block.
924  */
925 void
926 vm_page_free_toq(vm_page_t m)
927 {
928 	struct vpgqueues *pq;
929 
930 	crit_enter();
931 	mycpu->gd_cnt.v_tfree++;
932 
933 	KKASSERT((m->flags & PG_MAPPED) == 0);
934 
935 	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
936 		kprintf(
937 		"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
938 		    (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
939 		    m->hold_count);
940 		if ((m->queue - m->pc) == PQ_FREE)
941 			panic("vm_page_free: freeing free page");
942 		else
943 			panic("vm_page_free: freeing busy page");
944 	}
945 
946 	/*
947 	 * unqueue, then remove page.  Note that we cannot destroy
948 	 * the page here because we do not want to call the pager's
949 	 * callback routine until after we've put the page on the
950 	 * appropriate free queue.
951 	 */
952 	vm_page_unqueue_nowakeup(m);
953 	vm_page_remove(m);
954 
955 	/*
956 	 * No further management of fictitious pages occurs beyond object
957 	 * and queue removal.
958 	 */
959 	if ((m->flags & PG_FICTITIOUS) != 0) {
960 		vm_page_wakeup(m);
961 		crit_exit();
962 		return;
963 	}
964 
965 	m->valid = 0;
966 	vm_page_undirty(m);
967 
968 	if (m->wire_count != 0) {
969 		if (m->wire_count > 1) {
970 		    panic(
971 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
972 			m->wire_count, (long)m->pindex);
973 		}
974 		panic("vm_page_free: freeing wired page");
975 	}
976 
977 	/*
978 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
979 	 */
980 	if (m->flags & PG_UNMANAGED) {
981 	    m->flags &= ~PG_UNMANAGED;
982 	}
983 
984 	if (m->hold_count != 0) {
985 		m->flags &= ~PG_ZERO;
986 		m->queue = PQ_HOLD;
987 	} else {
988 		m->queue = PQ_FREE + m->pc;
989 	}
990 	pq = &vm_page_queues[m->queue];
991 	pq->lcnt++;
992 	++(*pq->cnt);
993 
994 	/*
995 	 * Put zero'd pages on the end ( where we look for zero'd pages
996 	 * first ) and non-zerod pages at the head.
997 	 */
998 	if (m->flags & PG_ZERO) {
999 		TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1000 		++vm_page_zero_count;
1001 	} else {
1002 		TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1003 	}
1004 	vm_page_wakeup(m);
1005 	vm_page_free_wakeup();
1006 	crit_exit();
1007 }
1008 
1009 /*
1010  * vm_page_unmanage()
1011  *
1012  * Prevent PV management from being done on the page.  The page is
1013  * removed from the paging queues as if it were wired, and as a
1014  * consequence of no longer being managed the pageout daemon will not
1015  * touch it (since there is no way to locate the pte mappings for the
1016  * page).  madvise() calls that mess with the pmap will also no longer
1017  * operate on the page.
1018  *
1019  * Beyond that the page is still reasonably 'normal'.  Freeing the page
1020  * will clear the flag.
1021  *
1022  * This routine is used by OBJT_PHYS objects - objects using unswappable
1023  * physical memory as backing store rather then swap-backed memory and
1024  * will eventually be extended to support 4MB unmanaged physical
1025  * mappings.
1026  *
1027  * Must be called with a critical section held.
1028  */
1029 void
1030 vm_page_unmanage(vm_page_t m)
1031 {
1032 	ASSERT_IN_CRIT_SECTION();
1033 	if ((m->flags & PG_UNMANAGED) == 0) {
1034 		if (m->wire_count == 0)
1035 			vm_page_unqueue(m);
1036 	}
1037 	vm_page_flag_set(m, PG_UNMANAGED);
1038 }
1039 
1040 /*
1041  * Mark this page as wired down by yet another map, removing it from
1042  * paging queues as necessary.
1043  *
1044  * The page queues must be locked.
1045  * This routine may not block.
1046  */
1047 void
1048 vm_page_wire(vm_page_t m)
1049 {
1050 	/*
1051 	 * Only bump the wire statistics if the page is not already wired,
1052 	 * and only unqueue the page if it is on some queue (if it is unmanaged
1053 	 * it is already off the queues).  Don't do anything with fictitious
1054 	 * pages because they are always wired.
1055 	 */
1056 	crit_enter();
1057 	if ((m->flags & PG_FICTITIOUS) == 0) {
1058 		if (m->wire_count == 0) {
1059 			if ((m->flags & PG_UNMANAGED) == 0)
1060 				vm_page_unqueue(m);
1061 			vmstats.v_wire_count++;
1062 		}
1063 		m->wire_count++;
1064 		KASSERT(m->wire_count != 0,
1065 			("vm_page_wire: wire_count overflow m=%p", m));
1066 	}
1067 	crit_exit();
1068 }
1069 
1070 /*
1071  * Release one wiring of this page, potentially enabling it to be paged again.
1072  *
1073  * Many pages placed on the inactive queue should actually go
1074  * into the cache, but it is difficult to figure out which.  What
1075  * we do instead, if the inactive target is well met, is to put
1076  * clean pages at the head of the inactive queue instead of the tail.
1077  * This will cause them to be moved to the cache more quickly and
1078  * if not actively re-referenced, freed more quickly.  If we just
1079  * stick these pages at the end of the inactive queue, heavy filesystem
1080  * meta-data accesses can cause an unnecessary paging load on memory bound
1081  * processes.  This optimization causes one-time-use metadata to be
1082  * reused more quickly.
1083  *
1084  * BUT, if we are in a low-memory situation we have no choice but to
1085  * put clean pages on the cache queue.
1086  *
1087  * A number of routines use vm_page_unwire() to guarantee that the page
1088  * will go into either the inactive or active queues, and will NEVER
1089  * be placed in the cache - for example, just after dirtying a page.
1090  * dirty pages in the cache are not allowed.
1091  *
1092  * The page queues must be locked.
1093  * This routine may not block.
1094  */
1095 void
1096 vm_page_unwire(vm_page_t m, int activate)
1097 {
1098 	crit_enter();
1099 	if (m->flags & PG_FICTITIOUS) {
1100 		/* do nothing */
1101 	} else if (m->wire_count <= 0) {
1102 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
1103 	} else {
1104 		if (--m->wire_count == 0) {
1105 			--vmstats.v_wire_count;
1106 			if (m->flags & PG_UNMANAGED) {
1107 				;
1108 			} else if (activate) {
1109 				TAILQ_INSERT_TAIL(
1110 				    &vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1111 				m->queue = PQ_ACTIVE;
1112 				vm_page_queues[PQ_ACTIVE].lcnt++;
1113 				vmstats.v_active_count++;
1114 			} else {
1115 				vm_page_flag_clear(m, PG_WINATCFLS);
1116 				TAILQ_INSERT_TAIL(
1117 				    &vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1118 				m->queue = PQ_INACTIVE;
1119 				vm_page_queues[PQ_INACTIVE].lcnt++;
1120 				vmstats.v_inactive_count++;
1121 			}
1122 		}
1123 	}
1124 	crit_exit();
1125 }
1126 
1127 
1128 /*
1129  * Move the specified page to the inactive queue.  If the page has
1130  * any associated swap, the swap is deallocated.
1131  *
1132  * Normally athead is 0 resulting in LRU operation.  athead is set
1133  * to 1 if we want this page to be 'as if it were placed in the cache',
1134  * except without unmapping it from the process address space.
1135  *
1136  * This routine may not block.
1137  */
1138 static __inline void
1139 _vm_page_deactivate(vm_page_t m, int athead)
1140 {
1141 	/*
1142 	 * Ignore if already inactive.
1143 	 */
1144 	if (m->queue == PQ_INACTIVE)
1145 		return;
1146 
1147 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1148 		if ((m->queue - m->pc) == PQ_CACHE)
1149 			mycpu->gd_cnt.v_reactivated++;
1150 		vm_page_flag_clear(m, PG_WINATCFLS);
1151 		vm_page_unqueue(m);
1152 		if (athead)
1153 			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1154 		else
1155 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1156 		m->queue = PQ_INACTIVE;
1157 		vm_page_queues[PQ_INACTIVE].lcnt++;
1158 		vmstats.v_inactive_count++;
1159 	}
1160 }
1161 
1162 void
1163 vm_page_deactivate(vm_page_t m)
1164 {
1165     crit_enter();
1166     _vm_page_deactivate(m, 0);
1167     crit_exit();
1168 }
1169 
1170 /*
1171  * vm_page_try_to_cache:
1172  *
1173  * Returns 0 on failure, 1 on success
1174  */
1175 int
1176 vm_page_try_to_cache(vm_page_t m)
1177 {
1178 	crit_enter();
1179 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1180 	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1181 		crit_exit();
1182 		return(0);
1183 	}
1184 	vm_page_test_dirty(m);
1185 	if (m->dirty) {
1186 		crit_exit();
1187 		return(0);
1188 	}
1189 	vm_page_cache(m);
1190 	crit_exit();
1191 	return(1);
1192 }
1193 
1194 /*
1195  * Attempt to free the page.  If we cannot free it, we do nothing.
1196  * 1 is returned on success, 0 on failure.
1197  */
1198 int
1199 vm_page_try_to_free(vm_page_t m)
1200 {
1201 	crit_enter();
1202 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1203 	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1204 		crit_exit();
1205 		return(0);
1206 	}
1207 	vm_page_test_dirty(m);
1208 	if (m->dirty) {
1209 		crit_exit();
1210 		return(0);
1211 	}
1212 	vm_page_busy(m);
1213 	vm_page_protect(m, VM_PROT_NONE);
1214 	vm_page_free(m);
1215 	crit_exit();
1216 	return(1);
1217 }
1218 
1219 /*
1220  * vm_page_cache
1221  *
1222  * Put the specified page onto the page cache queue (if appropriate).
1223  *
1224  * This routine may not block.
1225  */
1226 void
1227 vm_page_cache(vm_page_t m)
1228 {
1229 	ASSERT_IN_CRIT_SECTION();
1230 
1231 	if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
1232 			m->wire_count || m->hold_count) {
1233 		kprintf("vm_page_cache: attempting to cache busy/held page\n");
1234 		return;
1235 	}
1236 
1237 	/*
1238 	 * Already in the cache (and thus not mapped)
1239 	 */
1240 	if ((m->queue - m->pc) == PQ_CACHE) {
1241 		KKASSERT((m->flags & PG_MAPPED) == 0);
1242 		return;
1243 	}
1244 
1245 	/*
1246 	 * Caller is required to test m->dirty, but note that the act of
1247 	 * removing the page from its maps can cause it to become dirty
1248 	 * on an SMP system due to another cpu running in usermode.
1249 	 */
1250 	if (m->dirty) {
1251 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
1252 			(long)m->pindex);
1253 	}
1254 
1255 	/*
1256 	 * Remove all pmaps and indicate that the page is not
1257 	 * writeable or mapped.  Our vm_page_protect() call may
1258 	 * have blocked (especially w/ VM_PROT_NONE), so recheck
1259 	 * everything.
1260 	 */
1261 	vm_page_busy(m);
1262 	vm_page_protect(m, VM_PROT_NONE);
1263 	vm_page_wakeup(m);
1264 	if ((m->flags & (PG_BUSY|PG_UNMANAGED|PG_MAPPED)) || m->busy ||
1265 			m->wire_count || m->hold_count) {
1266 		/* do nothing */
1267 	} else if (m->dirty) {
1268 		vm_page_deactivate(m);
1269 	} else {
1270 		vm_page_unqueue_nowakeup(m);
1271 		m->queue = PQ_CACHE + m->pc;
1272 		vm_page_queues[m->queue].lcnt++;
1273 		TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1274 		vmstats.v_cache_count++;
1275 		vm_page_free_wakeup();
1276 	}
1277 }
1278 
1279 /*
1280  * vm_page_dontneed()
1281  *
1282  * Cache, deactivate, or do nothing as appropriate.  This routine
1283  * is typically used by madvise() MADV_DONTNEED.
1284  *
1285  * Generally speaking we want to move the page into the cache so
1286  * it gets reused quickly.  However, this can result in a silly syndrome
1287  * due to the page recycling too quickly.  Small objects will not be
1288  * fully cached.  On the otherhand, if we move the page to the inactive
1289  * queue we wind up with a problem whereby very large objects
1290  * unnecessarily blow away our inactive and cache queues.
1291  *
1292  * The solution is to move the pages based on a fixed weighting.  We
1293  * either leave them alone, deactivate them, or move them to the cache,
1294  * where moving them to the cache has the highest weighting.
1295  * By forcing some pages into other queues we eventually force the
1296  * system to balance the queues, potentially recovering other unrelated
1297  * space from active.  The idea is to not force this to happen too
1298  * often.
1299  */
1300 void
1301 vm_page_dontneed(vm_page_t m)
1302 {
1303 	static int dnweight;
1304 	int dnw;
1305 	int head;
1306 
1307 	dnw = ++dnweight;
1308 
1309 	/*
1310 	 * occassionally leave the page alone
1311 	 */
1312 	crit_enter();
1313 	if ((dnw & 0x01F0) == 0 ||
1314 	    m->queue == PQ_INACTIVE ||
1315 	    m->queue - m->pc == PQ_CACHE
1316 	) {
1317 		if (m->act_count >= ACT_INIT)
1318 			--m->act_count;
1319 		crit_exit();
1320 		return;
1321 	}
1322 
1323 	if (m->dirty == 0)
1324 		vm_page_test_dirty(m);
1325 
1326 	if (m->dirty || (dnw & 0x0070) == 0) {
1327 		/*
1328 		 * Deactivate the page 3 times out of 32.
1329 		 */
1330 		head = 0;
1331 	} else {
1332 		/*
1333 		 * Cache the page 28 times out of every 32.  Note that
1334 		 * the page is deactivated instead of cached, but placed
1335 		 * at the head of the queue instead of the tail.
1336 		 */
1337 		head = 1;
1338 	}
1339 	_vm_page_deactivate(m, head);
1340 	crit_exit();
1341 }
1342 
1343 /*
1344  * Grab a page, blocking if it is busy and allocating a page if necessary.
1345  * A busy page is returned or NULL.
1346  *
1347  * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified.
1348  * If VM_ALLOC_RETRY is not specified
1349  *
1350  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
1351  * always returned if we had blocked.
1352  * This routine will never return NULL if VM_ALLOC_RETRY is set.
1353  * This routine may not be called from an interrupt.
1354  * The returned page may not be entirely valid.
1355  *
1356  * This routine may be called from mainline code without spl protection and
1357  * be guarenteed a busied page associated with the object at the specified
1358  * index.
1359  */
1360 vm_page_t
1361 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1362 {
1363 	vm_page_t m;
1364 	int generation;
1365 
1366 	KKASSERT(allocflags &
1367 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1368 	crit_enter();
1369 retrylookup:
1370 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1371 		if (m->busy || (m->flags & PG_BUSY)) {
1372 			generation = object->generation;
1373 
1374 			while ((object->generation == generation) &&
1375 					(m->busy || (m->flags & PG_BUSY))) {
1376 				vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1377 				tsleep(m, 0, "pgrbwt", 0);
1378 				if ((allocflags & VM_ALLOC_RETRY) == 0) {
1379 					m = NULL;
1380 					goto done;
1381 				}
1382 			}
1383 			goto retrylookup;
1384 		} else {
1385 			vm_page_busy(m);
1386 			goto done;
1387 		}
1388 	}
1389 	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1390 	if (m == NULL) {
1391 		vm_wait(0);
1392 		if ((allocflags & VM_ALLOC_RETRY) == 0)
1393 			goto done;
1394 		goto retrylookup;
1395 	}
1396 done:
1397 	crit_exit();
1398 	return(m);
1399 }
1400 
1401 /*
1402  * Mapping function for valid bits or for dirty bits in
1403  * a page.  May not block.
1404  *
1405  * Inputs are required to range within a page.
1406  */
1407 __inline int
1408 vm_page_bits(int base, int size)
1409 {
1410 	int first_bit;
1411 	int last_bit;
1412 
1413 	KASSERT(
1414 	    base + size <= PAGE_SIZE,
1415 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
1416 	);
1417 
1418 	if (size == 0)		/* handle degenerate case */
1419 		return(0);
1420 
1421 	first_bit = base >> DEV_BSHIFT;
1422 	last_bit = (base + size - 1) >> DEV_BSHIFT;
1423 
1424 	return ((2 << last_bit) - (1 << first_bit));
1425 }
1426 
1427 /*
1428  * Sets portions of a page valid and clean.  The arguments are expected
1429  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1430  * of any partial chunks touched by the range.  The invalid portion of
1431  * such chunks will be zero'd.
1432  *
1433  * This routine may not block.
1434  *
1435  * (base + size) must be less then or equal to PAGE_SIZE.
1436  */
1437 void
1438 vm_page_set_validclean(vm_page_t m, int base, int size)
1439 {
1440 	int pagebits;
1441 	int frag;
1442 	int endoff;
1443 
1444 	if (size == 0)	/* handle degenerate case */
1445 		return;
1446 
1447 	/*
1448 	 * If the base is not DEV_BSIZE aligned and the valid
1449 	 * bit is clear, we have to zero out a portion of the
1450 	 * first block.
1451 	 */
1452 
1453 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1454 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1455 	) {
1456 		pmap_zero_page_area(
1457 		    VM_PAGE_TO_PHYS(m),
1458 		    frag,
1459 		    base - frag
1460 		);
1461 	}
1462 
1463 	/*
1464 	 * If the ending offset is not DEV_BSIZE aligned and the
1465 	 * valid bit is clear, we have to zero out a portion of
1466 	 * the last block.
1467 	 */
1468 
1469 	endoff = base + size;
1470 
1471 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1472 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1473 	) {
1474 		pmap_zero_page_area(
1475 		    VM_PAGE_TO_PHYS(m),
1476 		    endoff,
1477 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1478 		);
1479 	}
1480 
1481 	/*
1482 	 * Set valid, clear dirty bits.  If validating the entire
1483 	 * page we can safely clear the pmap modify bit.  We also
1484 	 * use this opportunity to clear the PG_NOSYNC flag.  If a process
1485 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
1486 	 * be set again.
1487 	 *
1488 	 * We set valid bits inclusive of any overlap, but we can only
1489 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
1490 	 * the range.
1491 	 */
1492 
1493 	pagebits = vm_page_bits(base, size);
1494 	m->valid |= pagebits;
1495 #if 0	/* NOT YET */
1496 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1497 		frag = DEV_BSIZE - frag;
1498 		base += frag;
1499 		size -= frag;
1500 		if (size < 0)
1501 		    size = 0;
1502 	}
1503 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1504 #endif
1505 	m->dirty &= ~pagebits;
1506 	if (base == 0 && size == PAGE_SIZE) {
1507 		pmap_clear_modify(m);
1508 		vm_page_flag_clear(m, PG_NOSYNC);
1509 	}
1510 }
1511 
1512 void
1513 vm_page_clear_dirty(vm_page_t m, int base, int size)
1514 {
1515 	m->dirty &= ~vm_page_bits(base, size);
1516 }
1517 
1518 /*
1519  * Make the page all-dirty.
1520  *
1521  * Also make sure the related object and vnode reflect the fact that the
1522  * object may now contain a dirty page.
1523  */
1524 void
1525 vm_page_dirty(vm_page_t m)
1526 {
1527 #ifdef INVARIANTS
1528         int pqtype = m->queue - m->pc;
1529 #endif
1530         KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
1531                 ("vm_page_dirty: page in free/cache queue!"));
1532 	if (m->dirty != VM_PAGE_BITS_ALL) {
1533 		m->dirty = VM_PAGE_BITS_ALL;
1534 		if (m->object)
1535 			vm_object_set_writeable_dirty(m->object);
1536 	}
1537 }
1538 
1539 /*
1540  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
1541  * valid and dirty bits for the effected areas are cleared.
1542  *
1543  * May not block.
1544  */
1545 void
1546 vm_page_set_invalid(vm_page_t m, int base, int size)
1547 {
1548 	int bits;
1549 
1550 	bits = vm_page_bits(base, size);
1551 	m->valid &= ~bits;
1552 	m->dirty &= ~bits;
1553 	m->object->generation++;
1554 }
1555 
1556 /*
1557  * The kernel assumes that the invalid portions of a page contain
1558  * garbage, but such pages can be mapped into memory by user code.
1559  * When this occurs, we must zero out the non-valid portions of the
1560  * page so user code sees what it expects.
1561  *
1562  * Pages are most often semi-valid when the end of a file is mapped
1563  * into memory and the file's size is not page aligned.
1564  */
1565 void
1566 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1567 {
1568 	int b;
1569 	int i;
1570 
1571 	/*
1572 	 * Scan the valid bits looking for invalid sections that
1573 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
1574 	 * valid bit may be set ) have already been zerod by
1575 	 * vm_page_set_validclean().
1576 	 */
1577 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1578 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
1579 		    (m->valid & (1 << i))
1580 		) {
1581 			if (i > b) {
1582 				pmap_zero_page_area(
1583 				    VM_PAGE_TO_PHYS(m),
1584 				    b << DEV_BSHIFT,
1585 				    (i - b) << DEV_BSHIFT
1586 				);
1587 			}
1588 			b = i + 1;
1589 		}
1590 	}
1591 
1592 	/*
1593 	 * setvalid is TRUE when we can safely set the zero'd areas
1594 	 * as being valid.  We can do this if there are no cache consistency
1595 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
1596 	 */
1597 	if (setvalid)
1598 		m->valid = VM_PAGE_BITS_ALL;
1599 }
1600 
1601 /*
1602  * Is a (partial) page valid?  Note that the case where size == 0
1603  * will return FALSE in the degenerate case where the page is entirely
1604  * invalid, and TRUE otherwise.
1605  *
1606  * May not block.
1607  */
1608 int
1609 vm_page_is_valid(vm_page_t m, int base, int size)
1610 {
1611 	int bits = vm_page_bits(base, size);
1612 
1613 	if (m->valid && ((m->valid & bits) == bits))
1614 		return 1;
1615 	else
1616 		return 0;
1617 }
1618 
1619 /*
1620  * update dirty bits from pmap/mmu.  May not block.
1621  */
1622 void
1623 vm_page_test_dirty(vm_page_t m)
1624 {
1625 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1626 		vm_page_dirty(m);
1627 	}
1628 }
1629 
1630 /*
1631  * Issue an event on a VM page.  Corresponding action structures are
1632  * removed from the page's list and called.
1633  */
1634 void
1635 vm_page_event_internal(vm_page_t m, vm_page_event_t event)
1636 {
1637 	struct vm_page_action *scan, *next;
1638 
1639 	LIST_FOREACH_MUTABLE(scan, &m->action_list, entry, next) {
1640 		if (scan->event == event) {
1641 			scan->event = VMEVENT_NONE;
1642 			LIST_REMOVE(scan, entry);
1643 			scan->func(m, scan);
1644 		}
1645 	}
1646 }
1647 
1648 #include "opt_ddb.h"
1649 #ifdef DDB
1650 #include <sys/kernel.h>
1651 
1652 #include <ddb/ddb.h>
1653 
1654 DB_SHOW_COMMAND(page, vm_page_print_page_info)
1655 {
1656 	db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
1657 	db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
1658 	db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
1659 	db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
1660 	db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
1661 	db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
1662 	db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
1663 	db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
1664 	db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
1665 	db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
1666 }
1667 
1668 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1669 {
1670 	int i;
1671 	db_printf("PQ_FREE:");
1672 	for(i=0;i<PQ_L2_SIZE;i++) {
1673 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1674 	}
1675 	db_printf("\n");
1676 
1677 	db_printf("PQ_CACHE:");
1678 	for(i=0;i<PQ_L2_SIZE;i++) {
1679 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1680 	}
1681 	db_printf("\n");
1682 
1683 	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1684 		vm_page_queues[PQ_ACTIVE].lcnt,
1685 		vm_page_queues[PQ_INACTIVE].lcnt);
1686 }
1687 #endif /* DDB */
1688