xref: /dflybsd-src/sys/vm/vm_page.c (revision 6bc31f17c9c90db02ddbd88208e06c29ed0f1534)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
38  * $DragonFly: src/sys/vm/vm_page.c,v 1.30 2005/06/02 20:57:21 swildner Exp $
39  */
40 
41 /*
42  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43  * All rights reserved.
44  *
45  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46  *
47  * Permission to use, copy, modify and distribute this software and
48  * its documentation is hereby granted, provided that both the copyright
49  * notice and this permission notice appear in all copies of the
50  * software, derivative works or modified versions, and any portions
51  * thereof, and that both notices appear in supporting documentation.
52  *
53  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56  *
57  * Carnegie Mellon requests users of this software to return to
58  *
59  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
60  *  School of Computer Science
61  *  Carnegie Mellon University
62  *  Pittsburgh PA 15213-3890
63  *
64  * any improvements or extensions that they make and grant Carnegie the
65  * rights to redistribute these changes.
66  */
67 /*
68  * Resident memory management module.  The module manipulates 'VM pages'.
69  * A VM page is the core building block for memory management.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 #include <sys/vmmeter.h>
77 #include <sys/vnode.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/vm_kern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_page2.h>
91 
92 static void vm_page_queue_init(void);
93 static void vm_page_free_wakeup(void);
94 static vm_page_t vm_page_select_cache(vm_object_t, vm_pindex_t);
95 static vm_page_t _vm_page_list_find2(int basequeue, int index);
96 
97 static int vm_page_bucket_count;	/* How big is array? */
98 static int vm_page_hash_mask;		/* Mask for hash function */
99 static struct vm_page **vm_page_buckets; /* Array of buckets */
100 static volatile int vm_page_bucket_generation;
101 struct vpgqueues vm_page_queues[PQ_COUNT]; /* Array of tailq lists */
102 
103 #define ASSERT_IN_CRIT_SECTION()	KKASSERT(crit_test(curthread));
104 
105 static void
106 vm_page_queue_init(void)
107 {
108 	int i;
109 
110 	for (i = 0; i < PQ_L2_SIZE; i++)
111 		vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
112 	for (i = 0; i < PQ_L2_SIZE; i++)
113 		vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
114 
115 	vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count;
116 	vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count;
117 	vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count;
118 	/* PQ_NONE has no queue */
119 
120 	for (i = 0; i < PQ_COUNT; i++)
121 		TAILQ_INIT(&vm_page_queues[i].pl);
122 }
123 
124 /*
125  * note: place in initialized data section?  Is this necessary?
126  */
127 long first_page = 0;
128 int vm_page_array_size = 0;
129 int vm_page_zero_count = 0;
130 vm_page_t vm_page_array = 0;
131 
132 /*
133  * (low level boot)
134  *
135  * Sets the page size, perhaps based upon the memory size.
136  * Must be called before any use of page-size dependent functions.
137  */
138 void
139 vm_set_page_size(void)
140 {
141 	if (vmstats.v_page_size == 0)
142 		vmstats.v_page_size = PAGE_SIZE;
143 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
144 		panic("vm_set_page_size: page size not a power of two");
145 }
146 
147 /*
148  * (low level boot)
149  *
150  * Add a new page to the freelist for use by the system.  New pages
151  * are added to both the head and tail of the associated free page
152  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
153  * requests pull 'recent' adds (higher physical addresses) first.
154  *
155  * Must be called in a critical section.
156  */
157 vm_page_t
158 vm_add_new_page(vm_paddr_t pa)
159 {
160 	struct vpgqueues *vpq;
161 	vm_page_t m;
162 
163 	++vmstats.v_page_count;
164 	++vmstats.v_free_count;
165 	m = PHYS_TO_VM_PAGE(pa);
166 	m->phys_addr = pa;
167 	m->flags = 0;
168 	m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
169 	m->queue = m->pc + PQ_FREE;
170 
171 	vpq = &vm_page_queues[m->queue];
172 	if (vpq->flipflop)
173 		TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
174 	else
175 		TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
176 	vpq->flipflop = 1 - vpq->flipflop;
177 
178 	vm_page_queues[m->queue].lcnt++;
179 	return (m);
180 }
181 
182 /*
183  * (low level boot)
184  *
185  * Initializes the resident memory module.
186  *
187  * Allocates memory for the page cells, and for the object/offset-to-page
188  * hash table headers.  Each page cell is initialized and placed on the
189  * free list.
190  */
191 vm_offset_t
192 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
193 {
194 	vm_offset_t mapped;
195 	struct vm_page **bucket;
196 	vm_size_t npages;
197 	vm_paddr_t page_range;
198 	vm_paddr_t new_end;
199 	int i;
200 	vm_paddr_t pa;
201 	int nblocks;
202 	vm_paddr_t last_pa;
203 
204 	/* the biggest memory array is the second group of pages */
205 	vm_paddr_t end;
206 	vm_paddr_t biggestone, biggestsize;
207 
208 	vm_paddr_t total;
209 
210 	total = 0;
211 	biggestsize = 0;
212 	biggestone = 0;
213 	nblocks = 0;
214 	vaddr = round_page(vaddr);
215 
216 	for (i = 0; phys_avail[i + 1]; i += 2) {
217 		phys_avail[i] = round_page(phys_avail[i]);
218 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
219 	}
220 
221 	for (i = 0; phys_avail[i + 1]; i += 2) {
222 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
223 
224 		if (size > biggestsize) {
225 			biggestone = i;
226 			biggestsize = size;
227 		}
228 		++nblocks;
229 		total += size;
230 	}
231 
232 	end = phys_avail[biggestone+1];
233 
234 	/*
235 	 * Initialize the queue headers for the free queue, the active queue
236 	 * and the inactive queue.
237 	 */
238 
239 	vm_page_queue_init();
240 
241 	/*
242 	 * Allocate (and initialize) the hash table buckets.
243 	 *
244 	 * The number of buckets MUST BE a power of 2, and the actual value is
245 	 * the next power of 2 greater than the number of physical pages in
246 	 * the system.
247 	 *
248 	 * We make the hash table approximately 2x the number of pages to
249 	 * reduce the chain length.  This is about the same size using the
250 	 * singly-linked list as the 1x hash table we were using before
251 	 * using TAILQ but the chain length will be smaller.
252 	 *
253 	 * Note: This computation can be tweaked if desired.
254 	 */
255 	vm_page_buckets = (struct vm_page **)vaddr;
256 	bucket = vm_page_buckets;
257 	if (vm_page_bucket_count == 0) {
258 		vm_page_bucket_count = 1;
259 		while (vm_page_bucket_count < atop(total))
260 			vm_page_bucket_count <<= 1;
261 	}
262 	vm_page_bucket_count <<= 1;
263 	vm_page_hash_mask = vm_page_bucket_count - 1;
264 
265 	/*
266 	 * Validate these addresses.
267 	 */
268 	new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
269 	new_end = trunc_page(new_end);
270 	mapped = round_page(vaddr);
271 	vaddr = pmap_map(mapped, new_end, end,
272 	    VM_PROT_READ | VM_PROT_WRITE);
273 	vaddr = round_page(vaddr);
274 	bzero((caddr_t) mapped, vaddr - mapped);
275 
276 	for (i = 0; i < vm_page_bucket_count; i++) {
277 		*bucket = NULL;
278 		bucket++;
279 	}
280 
281 	/*
282 	 * Compute the number of pages of memory that will be available for
283 	 * use (taking into account the overhead of a page structure per
284 	 * page).
285 	 */
286 	first_page = phys_avail[0] / PAGE_SIZE;
287 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
288 	npages = (total - (page_range * sizeof(struct vm_page)) -
289 	    (end - new_end)) / PAGE_SIZE;
290 
291 	end = new_end;
292 
293 	/*
294 	 * Initialize the mem entry structures now, and put them in the free
295 	 * queue.
296 	 */
297 	vm_page_array = (vm_page_t) vaddr;
298 	mapped = vaddr;
299 
300 	/*
301 	 * Validate these addresses.
302 	 */
303 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
304 	mapped = pmap_map(mapped, new_end, end,
305 	    VM_PROT_READ | VM_PROT_WRITE);
306 
307 	/*
308 	 * Clear all of the page structures
309 	 */
310 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
311 	vm_page_array_size = page_range;
312 
313 	/*
314 	 * Construct the free queue(s) in ascending order (by physical
315 	 * address) so that the first 16MB of physical memory is allocated
316 	 * last rather than first.  On large-memory machines, this avoids
317 	 * the exhaustion of low physical memory before isa_dmainit has run.
318 	 */
319 	vmstats.v_page_count = 0;
320 	vmstats.v_free_count = 0;
321 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
322 		pa = phys_avail[i];
323 		if (i == biggestone)
324 			last_pa = new_end;
325 		else
326 			last_pa = phys_avail[i + 1];
327 		while (pa < last_pa && npages-- > 0) {
328 			vm_add_new_page(pa);
329 			pa += PAGE_SIZE;
330 		}
331 	}
332 	return (mapped);
333 }
334 
335 /*
336  * Distributes the object/offset key pair among hash buckets.
337  *
338  * NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
339  * This routine may not block.
340  *
341  * We try to randomize the hash based on the object to spread the pages
342  * out in the hash table without it costing us too much.
343  */
344 static __inline int
345 vm_page_hash(vm_object_t object, vm_pindex_t pindex)
346 {
347 	int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
348 
349 	return(i & vm_page_hash_mask);
350 }
351 
352 /*
353  * The opposite of vm_page_hold().  A page can be freed while being held,
354  * which places it on the PQ_HOLD queue.  We must call vm_page_free_toq()
355  * in this case to actually free it once the hold count drops to 0.
356  *
357  * This routine must be called at splvm().
358  */
359 void
360 vm_page_unhold(vm_page_t mem)
361 {
362 	--mem->hold_count;
363 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
364 	if (mem->hold_count == 0 && mem->queue == PQ_HOLD) {
365 		vm_page_busy(mem);
366 		vm_page_free_toq(mem);
367 	}
368 }
369 
370 /*
371  * Inserts the given mem entry into the object and object list.
372  *
373  * The pagetables are not updated but will presumably fault the page
374  * in if necessary, or if a kernel page the caller will at some point
375  * enter the page into the kernel's pmap.  We are not allowed to block
376  * here so we *can't* do this anyway.
377  *
378  * This routine may not block.
379  * This routine must be called with a critical section held.
380  */
381 void
382 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
383 {
384 	struct vm_page **bucket;
385 
386 	ASSERT_IN_CRIT_SECTION();
387 	if (m->object != NULL)
388 		panic("vm_page_insert: already inserted");
389 
390 	/*
391 	 * Record the object/offset pair in this page
392 	 */
393 	m->object = object;
394 	m->pindex = pindex;
395 
396 	/*
397 	 * Insert it into the object_object/offset hash table
398 	 */
399 	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
400 	m->hnext = *bucket;
401 	*bucket = m;
402 	vm_page_bucket_generation++;
403 
404 	/*
405 	 * Now link into the object's list of backed pages.
406 	 */
407 	TAILQ_INSERT_TAIL(&object->memq, m, listq);
408 	object->generation++;
409 
410 	/*
411 	 * show that the object has one more resident page.
412 	 */
413 	object->resident_page_count++;
414 
415 	/*
416 	 * Since we are inserting a new and possibly dirty page,
417 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
418 	 */
419 	if (m->flags & PG_WRITEABLE)
420 		vm_object_set_writeable_dirty(object);
421 }
422 
423 /*
424  * Removes the given vm_page_t from the global (object,index) hash table
425  * and from the object's memq.
426  *
427  * The underlying pmap entry (if any) is NOT removed here.
428  * This routine may not block.
429  *
430  * The page must be BUSY and will remain BUSY on return.  No spl needs to be
431  * held on call to this routine.
432  *
433  * note: FreeBSD side effect was to unbusy the page on return.  We leave
434  * it busy.
435  */
436 void
437 vm_page_remove(vm_page_t m)
438 {
439 	vm_object_t object;
440 	struct vm_page **bucket;
441 
442 	crit_enter();
443 	if (m->object == NULL) {
444 		crit_exit();
445 		return;
446 	}
447 
448 	if ((m->flags & PG_BUSY) == 0)
449 		panic("vm_page_remove: page not busy");
450 
451 	object = m->object;
452 
453 	/*
454 	 * Remove from the object_object/offset hash table.  The object
455 	 * must be on the hash queue, we will panic if it isn't
456 	 *
457 	 * Note: we must NULL-out m->hnext to prevent loops in detached
458 	 * buffers with vm_page_lookup().
459 	 */
460 	bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
461 	while (*bucket != m) {
462 		if (*bucket == NULL)
463 		    panic("vm_page_remove(): page not found in hash");
464 		bucket = &(*bucket)->hnext;
465 	}
466 	*bucket = m->hnext;
467 	m->hnext = NULL;
468 	vm_page_bucket_generation++;
469 
470 	/*
471 	 * Now remove from the object's list of backed pages.
472 	 */
473 	TAILQ_REMOVE(&object->memq, m, listq);
474 
475 	/*
476 	 * And show that the object has one fewer resident page.
477 	 */
478 	object->resident_page_count--;
479 	object->generation++;
480 
481 	m->object = NULL;
482 	crit_exit();
483 }
484 
485 /*
486  * Locate and return the page at (object, pindex), or NULL if the
487  * page could not be found.
488  *
489  * This routine will operate properly without spl protection, but
490  * the returned page could be in flux if it is busy.  Because an
491  * interrupt can race a caller's busy check (unbusying and freeing the
492  * page we return before the caller is able to check the busy bit),
493  * the caller should generally call this routine with a critical
494  * section held.
495  *
496  * Callers may call this routine without spl protection if they know
497  * 'for sure' that the page will not be ripped out from under them
498  * by an interrupt.
499  */
500 vm_page_t
501 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
502 {
503 	vm_page_t m;
504 	struct vm_page **bucket;
505 	int generation;
506 
507 	/*
508 	 * Search the hash table for this object/offset pair
509 	 */
510 retry:
511 	generation = vm_page_bucket_generation;
512 	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
513 	for (m = *bucket; m != NULL; m = m->hnext) {
514 		if ((m->object == object) && (m->pindex == pindex)) {
515 			if (vm_page_bucket_generation != generation)
516 				goto retry;
517 			return (m);
518 		}
519 	}
520 	if (vm_page_bucket_generation != generation)
521 		goto retry;
522 	return (NULL);
523 }
524 
525 /*
526  * vm_page_rename()
527  *
528  * Move the given memory entry from its current object to the specified
529  * target object/offset.
530  *
531  * The object must be locked.
532  * This routine may not block.
533  *
534  * Note: This routine will raise itself to splvm(), the caller need not.
535  *
536  * Note: Swap associated with the page must be invalidated by the move.  We
537  *       have to do this for several reasons:  (1) we aren't freeing the
538  *       page, (2) we are dirtying the page, (3) the VM system is probably
539  *       moving the page from object A to B, and will then later move
540  *       the backing store from A to B and we can't have a conflict.
541  *
542  * Note: We *always* dirty the page.  It is necessary both for the
543  *       fact that we moved it, and because we may be invalidating
544  *	 swap.  If the page is on the cache, we have to deactivate it
545  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
546  *	 on the cache.
547  */
548 void
549 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
550 {
551 	crit_enter();
552 	vm_page_remove(m);
553 	vm_page_insert(m, new_object, new_pindex);
554 	if (m->queue - m->pc == PQ_CACHE)
555 		vm_page_deactivate(m);
556 	vm_page_dirty(m);
557 	vm_page_wakeup(m);
558 	crit_exit();
559 }
560 
561 /*
562  * vm_page_unqueue() without any wakeup.  This routine is used when a page
563  * is being moved between queues or otherwise is to remain BUSYied by the
564  * caller.
565  *
566  * This routine must be called at splhigh().
567  * This routine may not block.
568  */
569 void
570 vm_page_unqueue_nowakeup(vm_page_t m)
571 {
572 	int queue = m->queue;
573 	struct vpgqueues *pq;
574 
575 	if (queue != PQ_NONE) {
576 		pq = &vm_page_queues[queue];
577 		m->queue = PQ_NONE;
578 		TAILQ_REMOVE(&pq->pl, m, pageq);
579 		(*pq->cnt)--;
580 		pq->lcnt--;
581 	}
582 }
583 
584 /*
585  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
586  * if necessary.
587  *
588  * This routine must be called at splhigh().
589  * This routine may not block.
590  */
591 void
592 vm_page_unqueue(vm_page_t m)
593 {
594 	int queue = m->queue;
595 	struct vpgqueues *pq;
596 
597 	if (queue != PQ_NONE) {
598 		m->queue = PQ_NONE;
599 		pq = &vm_page_queues[queue];
600 		TAILQ_REMOVE(&pq->pl, m, pageq);
601 		(*pq->cnt)--;
602 		pq->lcnt--;
603 		if ((queue - m->pc) == PQ_CACHE) {
604 			if (vm_paging_needed())
605 				pagedaemon_wakeup();
606 		}
607 	}
608 }
609 
610 /*
611  * vm_page_list_find()
612  *
613  * Find a page on the specified queue with color optimization.
614  *
615  * The page coloring optimization attempts to locate a page that does
616  * not overload other nearby pages in the object in the cpu's L1 or L2
617  * caches.  We need this optimization because cpu caches tend to be
618  * physical caches, while object spaces tend to be virtual.
619  *
620  * This routine must be called at splvm().
621  * This routine may not block.
622  *
623  * Note that this routine is carefully inlined.  A non-inlined version
624  * is available for outside callers but the only critical path is
625  * from within this source file.
626  */
627 static __inline
628 vm_page_t
629 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
630 {
631 	vm_page_t m;
632 
633 	if (prefer_zero)
634 		m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
635 	else
636 		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
637 	if (m == NULL)
638 		m = _vm_page_list_find2(basequeue, index);
639 	return(m);
640 }
641 
642 static vm_page_t
643 _vm_page_list_find2(int basequeue, int index)
644 {
645 	int i;
646 	vm_page_t m = NULL;
647 	struct vpgqueues *pq;
648 
649 	pq = &vm_page_queues[basequeue];
650 
651 	/*
652 	 * Note that for the first loop, index+i and index-i wind up at the
653 	 * same place.  Even though this is not totally optimal, we've already
654 	 * blown it by missing the cache case so we do not care.
655 	 */
656 
657 	for(i = PQ_L2_SIZE / 2; i > 0; --i) {
658 		if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
659 			break;
660 
661 		if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
662 			break;
663 	}
664 	return(m);
665 }
666 
667 vm_page_t
668 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
669 {
670 	return(_vm_page_list_find(basequeue, index, prefer_zero));
671 }
672 
673 /*
674  * Find a page on the cache queue with color optimization.  As pages
675  * might be found, but not applicable, they are deactivated.  This
676  * keeps us from using potentially busy cached pages.
677  *
678  * This routine must be called with a critical section held.
679  * This routine may not block.
680  */
681 vm_page_t
682 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
683 {
684 	vm_page_t m;
685 
686 	while (TRUE) {
687 		m = _vm_page_list_find(
688 		    PQ_CACHE,
689 		    (pindex + object->pg_color) & PQ_L2_MASK,
690 		    FALSE
691 		);
692 		if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
693 			       m->hold_count || m->wire_count)) {
694 			vm_page_deactivate(m);
695 			continue;
696 		}
697 		return m;
698 	}
699 	/* not reached */
700 }
701 
702 /*
703  * Find a free or zero page, with specified preference.  We attempt to
704  * inline the nominal case and fall back to _vm_page_select_free()
705  * otherwise.
706  *
707  * This routine must be called with a critical section held.
708  * This routine may not block.
709  */
710 static __inline vm_page_t
711 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
712 {
713 	vm_page_t m;
714 
715 	m = _vm_page_list_find(
716 		PQ_FREE,
717 		(pindex + object->pg_color) & PQ_L2_MASK,
718 		prefer_zero
719 	);
720 	return(m);
721 }
722 
723 /*
724  * vm_page_alloc()
725  *
726  * Allocate and return a memory cell associated with this VM object/offset
727  * pair.
728  *
729  *	page_req classes:
730  *
731  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
732  *	VM_ALLOC_SYSTEM		greater free drain
733  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
734  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page
735  *
736  * The object must be locked.
737  * This routine may not block.
738  * The returned page will be marked PG_BUSY
739  *
740  * Additional special handling is required when called from an interrupt
741  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
742  * in this case.
743  */
744 vm_page_t
745 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
746 {
747 	vm_page_t m = NULL;
748 
749 	KASSERT(!vm_page_lookup(object, pindex),
750 		("vm_page_alloc: page already allocated"));
751 	KKASSERT(page_req &
752 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
753 
754 	/*
755 	 * The pager is allowed to eat deeper into the free page list.
756 	 */
757 	if (curthread == pagethread)
758 		page_req |= VM_ALLOC_SYSTEM;
759 
760 	crit_enter();
761 loop:
762 	if (vmstats.v_free_count > vmstats.v_free_reserved ||
763 	    ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
764 	    ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
765 		vmstats.v_free_count > vmstats.v_interrupt_free_min)
766 	) {
767 		/*
768 		 * The free queue has sufficient free pages to take one out.
769 		 */
770 		if (page_req & VM_ALLOC_ZERO)
771 			m = vm_page_select_free(object, pindex, TRUE);
772 		else
773 			m = vm_page_select_free(object, pindex, FALSE);
774 	} else if (page_req & VM_ALLOC_NORMAL) {
775 		/*
776 		 * Allocatable from the cache (non-interrupt only).  On
777 		 * success, we must free the page and try again, thus
778 		 * ensuring that vmstats.v_*_free_min counters are replenished.
779 		 */
780 #ifdef INVARIANTS
781 		if (curthread->td_preempted) {
782 			printf("vm_page_alloc(): warning, attempt to allocate"
783 				" cache page from preempting interrupt\n");
784 			m = NULL;
785 		} else {
786 			m = vm_page_select_cache(object, pindex);
787 		}
788 #else
789 		m = vm_page_select_cache(object, pindex);
790 #endif
791 		/*
792 		 * On success move the page into the free queue and loop.
793 		 */
794 		if (m != NULL) {
795 			KASSERT(m->dirty == 0,
796 			    ("Found dirty cache page %p", m));
797 			vm_page_busy(m);
798 			vm_page_protect(m, VM_PROT_NONE);
799 			vm_page_free(m);
800 			goto loop;
801 		}
802 
803 		/*
804 		 * On failure return NULL
805 		 */
806 		crit_exit();
807 #if defined(DIAGNOSTIC)
808 		if (vmstats.v_cache_count > 0)
809 			printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
810 #endif
811 		vm_pageout_deficit++;
812 		pagedaemon_wakeup();
813 		return (NULL);
814 	} else {
815 		/*
816 		 * No pages available, wakeup the pageout daemon and give up.
817 		 */
818 		crit_exit();
819 		vm_pageout_deficit++;
820 		pagedaemon_wakeup();
821 		return (NULL);
822 	}
823 
824 	/*
825 	 * Good page found.  The page has not yet been busied.  We are in
826 	 * a critical section.
827 	 */
828 	KASSERT(m != NULL, ("vm_page_alloc(): missing page on free queue\n"));
829 
830 	/*
831 	 * Remove from free queue
832 	 */
833 	vm_page_unqueue_nowakeup(m);
834 
835 	/*
836 	 * Initialize structure.  Only the PG_ZERO flag is inherited.  Set
837 	 * the page PG_BUSY
838 	 */
839 	if (m->flags & PG_ZERO) {
840 		vm_page_zero_count--;
841 		m->flags = PG_ZERO | PG_BUSY;
842 	} else {
843 		m->flags = PG_BUSY;
844 	}
845 	m->wire_count = 0;
846 	m->hold_count = 0;
847 	m->act_count = 0;
848 	m->busy = 0;
849 	m->valid = 0;
850 	KASSERT(m->dirty == 0,
851 		("vm_page_alloc: free/cache page %p was dirty", m));
852 
853 	/*
854 	 * vm_page_insert() is safe prior to the crit_exit().  Note also that
855 	 * inserting a page here does not insert it into the pmap (which
856 	 * could cause us to block allocating memory).  We cannot block
857 	 * anywhere.
858 	 */
859 	vm_page_insert(m, object, pindex);
860 
861 	/*
862 	 * Don't wakeup too often - wakeup the pageout daemon when
863 	 * we would be nearly out of memory.
864 	 */
865 	if (vm_paging_needed())
866 		pagedaemon_wakeup();
867 
868 	crit_exit();
869 
870 	/*
871 	 * A PG_BUSY page is returned.
872 	 */
873 	return (m);
874 }
875 
876 /*
877  * Block until free pages are available for allocation, called in various
878  * places before memory allocations.
879  */
880 void
881 vm_wait(void)
882 {
883 	crit_enter();
884 	if (curthread == pagethread) {
885 		vm_pageout_pages_needed = 1;
886 		tsleep(&vm_pageout_pages_needed, 0, "VMWait", 0);
887 	} else {
888 		if (!vm_pages_needed) {
889 			vm_pages_needed = 1;
890 			wakeup(&vm_pages_needed);
891 		}
892 		tsleep(&vmstats.v_free_count, 0, "vmwait", 0);
893 	}
894 	crit_exit();
895 }
896 
897 /*
898  * Block until free pages are available for allocation
899  *
900  * Called only in vm_fault so that processes page faulting can be
901  * easily tracked.
902  *
903  * Sleeps at a lower priority than vm_wait() so that vm_wait()ing
904  * processes will be able to grab memory first.  Do not change
905  * this balance without careful testing first.
906  */
907 void
908 vm_waitpfault(void)
909 {
910 	crit_enter();
911 	if (!vm_pages_needed) {
912 		vm_pages_needed = 1;
913 		wakeup(&vm_pages_needed);
914 	}
915 	tsleep(&vmstats.v_free_count, 0, "pfault", 0);
916 	crit_exit();
917 }
918 
919 /*
920  * Put the specified page on the active list (if appropriate).  Ensure
921  * that act_count is at least ACT_INIT but do not otherwise mess with it.
922  *
923  * The page queues must be locked.
924  * This routine may not block.
925  */
926 void
927 vm_page_activate(vm_page_t m)
928 {
929 	crit_enter();
930 	if (m->queue != PQ_ACTIVE) {
931 		if ((m->queue - m->pc) == PQ_CACHE)
932 			mycpu->gd_cnt.v_reactivated++;
933 
934 		vm_page_unqueue(m);
935 
936 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
937 			m->queue = PQ_ACTIVE;
938 			vm_page_queues[PQ_ACTIVE].lcnt++;
939 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl,
940 					    m, pageq);
941 			if (m->act_count < ACT_INIT)
942 				m->act_count = ACT_INIT;
943 			vmstats.v_active_count++;
944 		}
945 	} else {
946 		if (m->act_count < ACT_INIT)
947 			m->act_count = ACT_INIT;
948 	}
949 	crit_exit();
950 }
951 
952 /*
953  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
954  * routine is called when a page has been added to the cache or free
955  * queues.
956  *
957  * This routine may not block.
958  * This routine must be called at splvm()
959  */
960 static __inline void
961 vm_page_free_wakeup(void)
962 {
963 	/*
964 	 * if pageout daemon needs pages, then tell it that there are
965 	 * some free.
966 	 */
967 	if (vm_pageout_pages_needed &&
968 	    vmstats.v_cache_count + vmstats.v_free_count >=
969 	    vmstats.v_pageout_free_min
970 	) {
971 		wakeup(&vm_pageout_pages_needed);
972 		vm_pageout_pages_needed = 0;
973 	}
974 
975 	/*
976 	 * wakeup processes that are waiting on memory if we hit a
977 	 * high water mark. And wakeup scheduler process if we have
978 	 * lots of memory. this process will swapin processes.
979 	 */
980 	if (vm_pages_needed && !vm_page_count_min()) {
981 		vm_pages_needed = 0;
982 		wakeup(&vmstats.v_free_count);
983 	}
984 }
985 
986 /*
987  *	vm_page_free_toq:
988  *
989  *	Returns the given page to the PQ_FREE list, disassociating it with
990  *	any VM object.
991  *
992  *	The vm_page must be PG_BUSY on entry.  PG_BUSY will be released on
993  *	return (the page will have been freed).  No particular spl is required
994  *	on entry.
995  *
996  *	This routine may not block.
997  */
998 void
999 vm_page_free_toq(vm_page_t m)
1000 {
1001 	struct vpgqueues *pq;
1002 
1003 	crit_enter();
1004 	mycpu->gd_cnt.v_tfree++;
1005 
1006 	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1007 		printf(
1008 		"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1009 		    (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1010 		    m->hold_count);
1011 		if ((m->queue - m->pc) == PQ_FREE)
1012 			panic("vm_page_free: freeing free page");
1013 		else
1014 			panic("vm_page_free: freeing busy page");
1015 	}
1016 
1017 	/*
1018 	 * unqueue, then remove page.  Note that we cannot destroy
1019 	 * the page here because we do not want to call the pager's
1020 	 * callback routine until after we've put the page on the
1021 	 * appropriate free queue.
1022 	 */
1023 	vm_page_unqueue_nowakeup(m);
1024 	vm_page_remove(m);
1025 
1026 	/*
1027 	 * No further management of fictitious pages occurs beyond object
1028 	 * and queue removal.
1029 	 */
1030 	if ((m->flags & PG_FICTITIOUS) != 0) {
1031 		vm_page_wakeup(m);
1032 		crit_exit();
1033 		return;
1034 	}
1035 
1036 	m->valid = 0;
1037 	vm_page_undirty(m);
1038 
1039 	if (m->wire_count != 0) {
1040 		if (m->wire_count > 1) {
1041 		    panic(
1042 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1043 			m->wire_count, (long)m->pindex);
1044 		}
1045 		panic("vm_page_free: freeing wired page");
1046 	}
1047 
1048 	/*
1049 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
1050 	 */
1051 	if (m->flags & PG_UNMANAGED) {
1052 	    m->flags &= ~PG_UNMANAGED;
1053 	}
1054 
1055 	if (m->hold_count != 0) {
1056 		m->flags &= ~PG_ZERO;
1057 		m->queue = PQ_HOLD;
1058 	} else {
1059 		m->queue = PQ_FREE + m->pc;
1060 	}
1061 	pq = &vm_page_queues[m->queue];
1062 	pq->lcnt++;
1063 	++(*pq->cnt);
1064 
1065 	/*
1066 	 * Put zero'd pages on the end ( where we look for zero'd pages
1067 	 * first ) and non-zerod pages at the head.
1068 	 */
1069 	if (m->flags & PG_ZERO) {
1070 		TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1071 		++vm_page_zero_count;
1072 	} else {
1073 		TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1074 	}
1075 	vm_page_wakeup(m);
1076 	vm_page_free_wakeup();
1077 	crit_exit();
1078 }
1079 
1080 /*
1081  * vm_page_unmanage()
1082  *
1083  * Prevent PV management from being done on the page.  The page is
1084  * removed from the paging queues as if it were wired, and as a
1085  * consequence of no longer being managed the pageout daemon will not
1086  * touch it (since there is no way to locate the pte mappings for the
1087  * page).  madvise() calls that mess with the pmap will also no longer
1088  * operate on the page.
1089  *
1090  * Beyond that the page is still reasonably 'normal'.  Freeing the page
1091  * will clear the flag.
1092  *
1093  * This routine is used by OBJT_PHYS objects - objects using unswappable
1094  * physical memory as backing store rather then swap-backed memory and
1095  * will eventually be extended to support 4MB unmanaged physical
1096  * mappings.
1097  *
1098  * Must be called with a critical section held.
1099  */
1100 void
1101 vm_page_unmanage(vm_page_t m)
1102 {
1103 	ASSERT_IN_CRIT_SECTION();
1104 	if ((m->flags & PG_UNMANAGED) == 0) {
1105 		if (m->wire_count == 0)
1106 			vm_page_unqueue(m);
1107 	}
1108 	vm_page_flag_set(m, PG_UNMANAGED);
1109 }
1110 
1111 /*
1112  * Mark this page as wired down by yet another map, removing it from
1113  * paging queues as necessary.
1114  *
1115  * The page queues must be locked.
1116  * This routine may not block.
1117  */
1118 void
1119 vm_page_wire(vm_page_t m)
1120 {
1121 	/*
1122 	 * Only bump the wire statistics if the page is not already wired,
1123 	 * and only unqueue the page if it is on some queue (if it is unmanaged
1124 	 * it is already off the queues).  Don't do anything with fictitious
1125 	 * pages because they are always wired.
1126 	 */
1127 	crit_enter();
1128 	if ((m->flags & PG_FICTITIOUS) == 0) {
1129 		if (m->wire_count == 0) {
1130 			if ((m->flags & PG_UNMANAGED) == 0)
1131 				vm_page_unqueue(m);
1132 			vmstats.v_wire_count++;
1133 		}
1134 		m->wire_count++;
1135 		KASSERT(m->wire_count != 0,
1136 		    ("vm_page_wire: wire_count overflow m=%p", m));
1137 	}
1138 	vm_page_flag_set(m, PG_MAPPED);
1139 	crit_exit();
1140 }
1141 
1142 /*
1143  * Release one wiring of this page, potentially enabling it to be paged again.
1144  *
1145  * Many pages placed on the inactive queue should actually go
1146  * into the cache, but it is difficult to figure out which.  What
1147  * we do instead, if the inactive target is well met, is to put
1148  * clean pages at the head of the inactive queue instead of the tail.
1149  * This will cause them to be moved to the cache more quickly and
1150  * if not actively re-referenced, freed more quickly.  If we just
1151  * stick these pages at the end of the inactive queue, heavy filesystem
1152  * meta-data accesses can cause an unnecessary paging load on memory bound
1153  * processes.  This optimization causes one-time-use metadata to be
1154  * reused more quickly.
1155  *
1156  * BUT, if we are in a low-memory situation we have no choice but to
1157  * put clean pages on the cache queue.
1158  *
1159  * A number of routines use vm_page_unwire() to guarantee that the page
1160  * will go into either the inactive or active queues, and will NEVER
1161  * be placed in the cache - for example, just after dirtying a page.
1162  * dirty pages in the cache are not allowed.
1163  *
1164  * The page queues must be locked.
1165  * This routine may not block.
1166  */
1167 void
1168 vm_page_unwire(vm_page_t m, int activate)
1169 {
1170 	crit_enter();
1171 	if (m->flags & PG_FICTITIOUS) {
1172 		/* do nothing */
1173 	} else if (m->wire_count <= 0) {
1174 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
1175 	} else {
1176 		if (--m->wire_count == 0) {
1177 			--vmstats.v_wire_count;
1178 			if (m->flags & PG_UNMANAGED) {
1179 				;
1180 			} else if (activate) {
1181 				TAILQ_INSERT_TAIL(
1182 				    &vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1183 				m->queue = PQ_ACTIVE;
1184 				vm_page_queues[PQ_ACTIVE].lcnt++;
1185 				vmstats.v_active_count++;
1186 			} else {
1187 				vm_page_flag_clear(m, PG_WINATCFLS);
1188 				TAILQ_INSERT_TAIL(
1189 				    &vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1190 				m->queue = PQ_INACTIVE;
1191 				vm_page_queues[PQ_INACTIVE].lcnt++;
1192 				vmstats.v_inactive_count++;
1193 			}
1194 		}
1195 	}
1196 	crit_exit();
1197 }
1198 
1199 
1200 /*
1201  * Move the specified page to the inactive queue.  If the page has
1202  * any associated swap, the swap is deallocated.
1203  *
1204  * Normally athead is 0 resulting in LRU operation.  athead is set
1205  * to 1 if we want this page to be 'as if it were placed in the cache',
1206  * except without unmapping it from the process address space.
1207  *
1208  * This routine may not block.
1209  */
1210 static __inline void
1211 _vm_page_deactivate(vm_page_t m, int athead)
1212 {
1213 	/*
1214 	 * Ignore if already inactive.
1215 	 */
1216 	if (m->queue == PQ_INACTIVE)
1217 		return;
1218 
1219 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1220 		if ((m->queue - m->pc) == PQ_CACHE)
1221 			mycpu->gd_cnt.v_reactivated++;
1222 		vm_page_flag_clear(m, PG_WINATCFLS);
1223 		vm_page_unqueue(m);
1224 		if (athead)
1225 			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1226 		else
1227 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1228 		m->queue = PQ_INACTIVE;
1229 		vm_page_queues[PQ_INACTIVE].lcnt++;
1230 		vmstats.v_inactive_count++;
1231 	}
1232 }
1233 
1234 void
1235 vm_page_deactivate(vm_page_t m)
1236 {
1237     crit_enter();
1238     _vm_page_deactivate(m, 0);
1239     crit_exit();
1240 }
1241 
1242 /*
1243  * vm_page_try_to_cache:
1244  *
1245  * Returns 0 on failure, 1 on success
1246  */
1247 int
1248 vm_page_try_to_cache(vm_page_t m)
1249 {
1250 	crit_enter();
1251 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1252 	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1253 		return(0);
1254 	}
1255 	vm_page_test_dirty(m);
1256 	if (m->dirty) {
1257 		crit_exit();
1258 		return(0);
1259 	}
1260 	vm_page_cache(m);
1261 	crit_exit();
1262 	return(1);
1263 }
1264 
1265 /*
1266  * Attempt to free the page.  If we cannot free it, we do nothing.
1267  * 1 is returned on success, 0 on failure.
1268  */
1269 int
1270 vm_page_try_to_free(vm_page_t m)
1271 {
1272 	crit_enter();
1273 	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1274 	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1275 		crit_exit();
1276 		return(0);
1277 	}
1278 	vm_page_test_dirty(m);
1279 	if (m->dirty) {
1280 		crit_exit();
1281 		return(0);
1282 	}
1283 	vm_page_busy(m);
1284 	vm_page_protect(m, VM_PROT_NONE);
1285 	vm_page_free(m);
1286 	crit_exit();
1287 	return(1);
1288 }
1289 
1290 /*
1291  * vm_page_cache
1292  *
1293  * Put the specified page onto the page cache queue (if appropriate).
1294  *
1295  * This routine may not block.
1296  */
1297 void
1298 vm_page_cache(vm_page_t m)
1299 {
1300 	ASSERT_IN_CRIT_SECTION();
1301 
1302 	if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
1303 			m->wire_count || m->hold_count) {
1304 		printf("vm_page_cache: attempting to cache busy/held page\n");
1305 		return;
1306 	}
1307 	if ((m->queue - m->pc) == PQ_CACHE)
1308 		return;
1309 
1310 	/*
1311 	 * Remove all pmaps and indicate that the page is not
1312 	 * writeable or mapped.
1313 	 */
1314 
1315 	vm_page_protect(m, VM_PROT_NONE);
1316 	if (m->dirty != 0) {
1317 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
1318 			(long)m->pindex);
1319 	}
1320 	vm_page_unqueue_nowakeup(m);
1321 	m->queue = PQ_CACHE + m->pc;
1322 	vm_page_queues[m->queue].lcnt++;
1323 	TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1324 	vmstats.v_cache_count++;
1325 	vm_page_free_wakeup();
1326 }
1327 
1328 /*
1329  * vm_page_dontneed()
1330  *
1331  * Cache, deactivate, or do nothing as appropriate.  This routine
1332  * is typically used by madvise() MADV_DONTNEED.
1333  *
1334  * Generally speaking we want to move the page into the cache so
1335  * it gets reused quickly.  However, this can result in a silly syndrome
1336  * due to the page recycling too quickly.  Small objects will not be
1337  * fully cached.  On the otherhand, if we move the page to the inactive
1338  * queue we wind up with a problem whereby very large objects
1339  * unnecessarily blow away our inactive and cache queues.
1340  *
1341  * The solution is to move the pages based on a fixed weighting.  We
1342  * either leave them alone, deactivate them, or move them to the cache,
1343  * where moving them to the cache has the highest weighting.
1344  * By forcing some pages into other queues we eventually force the
1345  * system to balance the queues, potentially recovering other unrelated
1346  * space from active.  The idea is to not force this to happen too
1347  * often.
1348  */
1349 void
1350 vm_page_dontneed(vm_page_t m)
1351 {
1352 	static int dnweight;
1353 	int dnw;
1354 	int head;
1355 
1356 	dnw = ++dnweight;
1357 
1358 	/*
1359 	 * occassionally leave the page alone
1360 	 */
1361 	crit_enter();
1362 	if ((dnw & 0x01F0) == 0 ||
1363 	    m->queue == PQ_INACTIVE ||
1364 	    m->queue - m->pc == PQ_CACHE
1365 	) {
1366 		if (m->act_count >= ACT_INIT)
1367 			--m->act_count;
1368 		crit_exit();
1369 		return;
1370 	}
1371 
1372 	if (m->dirty == 0)
1373 		vm_page_test_dirty(m);
1374 
1375 	if (m->dirty || (dnw & 0x0070) == 0) {
1376 		/*
1377 		 * Deactivate the page 3 times out of 32.
1378 		 */
1379 		head = 0;
1380 	} else {
1381 		/*
1382 		 * Cache the page 28 times out of every 32.  Note that
1383 		 * the page is deactivated instead of cached, but placed
1384 		 * at the head of the queue instead of the tail.
1385 		 */
1386 		head = 1;
1387 	}
1388 	_vm_page_deactivate(m, head);
1389 	crit_exit();
1390 }
1391 
1392 /*
1393  * Grab a page, blocking if it is busy and allocating a page if necessary.
1394  * A busy page is returned or NULL.
1395  *
1396  * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified.
1397  * If VM_ALLOC_RETRY is not specified
1398  *
1399  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
1400  * always returned if we had blocked.
1401  * This routine will never return NULL if VM_ALLOC_RETRY is set.
1402  * This routine may not be called from an interrupt.
1403  * The returned page may not be entirely valid.
1404  *
1405  * This routine may be called from mainline code without spl protection and
1406  * be guarenteed a busied page associated with the object at the specified
1407  * index.
1408  */
1409 vm_page_t
1410 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1411 {
1412 	vm_page_t m;
1413 	int generation;
1414 
1415 	KKASSERT(allocflags &
1416 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1417 	crit_enter();
1418 retrylookup:
1419 	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1420 		if (m->busy || (m->flags & PG_BUSY)) {
1421 			generation = object->generation;
1422 
1423 			while ((object->generation == generation) &&
1424 					(m->busy || (m->flags & PG_BUSY))) {
1425 				vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1426 				tsleep(m, 0, "pgrbwt", 0);
1427 				if ((allocflags & VM_ALLOC_RETRY) == 0) {
1428 					m = NULL;
1429 					goto done;
1430 				}
1431 			}
1432 			goto retrylookup;
1433 		} else {
1434 			vm_page_busy(m);
1435 			goto done;
1436 		}
1437 	}
1438 	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1439 	if (m == NULL) {
1440 		vm_wait();
1441 		if ((allocflags & VM_ALLOC_RETRY) == 0)
1442 			goto done;
1443 		goto retrylookup;
1444 	}
1445 done:
1446 	crit_exit();
1447 	return(m);
1448 }
1449 
1450 /*
1451  * Mapping function for valid bits or for dirty bits in
1452  * a page.  May not block.
1453  *
1454  * Inputs are required to range within a page.
1455  */
1456 __inline int
1457 vm_page_bits(int base, int size)
1458 {
1459 	int first_bit;
1460 	int last_bit;
1461 
1462 	KASSERT(
1463 	    base + size <= PAGE_SIZE,
1464 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
1465 	);
1466 
1467 	if (size == 0)		/* handle degenerate case */
1468 		return(0);
1469 
1470 	first_bit = base >> DEV_BSHIFT;
1471 	last_bit = (base + size - 1) >> DEV_BSHIFT;
1472 
1473 	return ((2 << last_bit) - (1 << first_bit));
1474 }
1475 
1476 /*
1477  * Sets portions of a page valid and clean.  The arguments are expected
1478  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1479  * of any partial chunks touched by the range.  The invalid portion of
1480  * such chunks will be zero'd.
1481  *
1482  * This routine may not block.
1483  *
1484  * (base + size) must be less then or equal to PAGE_SIZE.
1485  */
1486 void
1487 vm_page_set_validclean(vm_page_t m, int base, int size)
1488 {
1489 	int pagebits;
1490 	int frag;
1491 	int endoff;
1492 
1493 	if (size == 0)	/* handle degenerate case */
1494 		return;
1495 
1496 	/*
1497 	 * If the base is not DEV_BSIZE aligned and the valid
1498 	 * bit is clear, we have to zero out a portion of the
1499 	 * first block.
1500 	 */
1501 
1502 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1503 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1504 	) {
1505 		pmap_zero_page_area(
1506 		    VM_PAGE_TO_PHYS(m),
1507 		    frag,
1508 		    base - frag
1509 		);
1510 	}
1511 
1512 	/*
1513 	 * If the ending offset is not DEV_BSIZE aligned and the
1514 	 * valid bit is clear, we have to zero out a portion of
1515 	 * the last block.
1516 	 */
1517 
1518 	endoff = base + size;
1519 
1520 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1521 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1522 	) {
1523 		pmap_zero_page_area(
1524 		    VM_PAGE_TO_PHYS(m),
1525 		    endoff,
1526 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1527 		);
1528 	}
1529 
1530 	/*
1531 	 * Set valid, clear dirty bits.  If validating the entire
1532 	 * page we can safely clear the pmap modify bit.  We also
1533 	 * use this opportunity to clear the PG_NOSYNC flag.  If a process
1534 	 * takes a write fault on a MAP_NOSYNC memory area the flag will
1535 	 * be set again.
1536 	 *
1537 	 * We set valid bits inclusive of any overlap, but we can only
1538 	 * clear dirty bits for DEV_BSIZE chunks that are fully within
1539 	 * the range.
1540 	 */
1541 
1542 	pagebits = vm_page_bits(base, size);
1543 	m->valid |= pagebits;
1544 #if 0	/* NOT YET */
1545 	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1546 		frag = DEV_BSIZE - frag;
1547 		base += frag;
1548 		size -= frag;
1549 		if (size < 0)
1550 		    size = 0;
1551 	}
1552 	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1553 #endif
1554 	m->dirty &= ~pagebits;
1555 	if (base == 0 && size == PAGE_SIZE) {
1556 		pmap_clear_modify(m);
1557 		vm_page_flag_clear(m, PG_NOSYNC);
1558 	}
1559 }
1560 
1561 void
1562 vm_page_clear_dirty(vm_page_t m, int base, int size)
1563 {
1564 	m->dirty &= ~vm_page_bits(base, size);
1565 }
1566 
1567 /*
1568  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
1569  * valid and dirty bits for the effected areas are cleared.
1570  *
1571  * May not block.
1572  */
1573 void
1574 vm_page_set_invalid(vm_page_t m, int base, int size)
1575 {
1576 	int bits;
1577 
1578 	bits = vm_page_bits(base, size);
1579 	m->valid &= ~bits;
1580 	m->dirty &= ~bits;
1581 	m->object->generation++;
1582 }
1583 
1584 /*
1585  * The kernel assumes that the invalid portions of a page contain
1586  * garbage, but such pages can be mapped into memory by user code.
1587  * When this occurs, we must zero out the non-valid portions of the
1588  * page so user code sees what it expects.
1589  *
1590  * Pages are most often semi-valid when the end of a file is mapped
1591  * into memory and the file's size is not page aligned.
1592  */
1593 void
1594 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1595 {
1596 	int b;
1597 	int i;
1598 
1599 	/*
1600 	 * Scan the valid bits looking for invalid sections that
1601 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
1602 	 * valid bit may be set ) have already been zerod by
1603 	 * vm_page_set_validclean().
1604 	 */
1605 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1606 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
1607 		    (m->valid & (1 << i))
1608 		) {
1609 			if (i > b) {
1610 				pmap_zero_page_area(
1611 				    VM_PAGE_TO_PHYS(m),
1612 				    b << DEV_BSHIFT,
1613 				    (i - b) << DEV_BSHIFT
1614 				);
1615 			}
1616 			b = i + 1;
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * setvalid is TRUE when we can safely set the zero'd areas
1622 	 * as being valid.  We can do this if there are no cache consistency
1623 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
1624 	 */
1625 	if (setvalid)
1626 		m->valid = VM_PAGE_BITS_ALL;
1627 }
1628 
1629 /*
1630  * Is a (partial) page valid?  Note that the case where size == 0
1631  * will return FALSE in the degenerate case where the page is entirely
1632  * invalid, and TRUE otherwise.
1633  *
1634  * May not block.
1635  */
1636 int
1637 vm_page_is_valid(vm_page_t m, int base, int size)
1638 {
1639 	int bits = vm_page_bits(base, size);
1640 
1641 	if (m->valid && ((m->valid & bits) == bits))
1642 		return 1;
1643 	else
1644 		return 0;
1645 }
1646 
1647 /*
1648  * update dirty bits from pmap/mmu.  May not block.
1649  */
1650 void
1651 vm_page_test_dirty(vm_page_t m)
1652 {
1653 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1654 		vm_page_dirty(m);
1655 	}
1656 }
1657 
1658 #include "opt_ddb.h"
1659 #ifdef DDB
1660 #include <sys/kernel.h>
1661 
1662 #include <ddb/ddb.h>
1663 
1664 DB_SHOW_COMMAND(page, vm_page_print_page_info)
1665 {
1666 	db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
1667 	db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
1668 	db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
1669 	db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
1670 	db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
1671 	db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
1672 	db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
1673 	db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
1674 	db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
1675 	db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
1676 }
1677 
1678 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1679 {
1680 	int i;
1681 	db_printf("PQ_FREE:");
1682 	for(i=0;i<PQ_L2_SIZE;i++) {
1683 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1684 	}
1685 	db_printf("\n");
1686 
1687 	db_printf("PQ_CACHE:");
1688 	for(i=0;i<PQ_L2_SIZE;i++) {
1689 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1690 	}
1691 	db_printf("\n");
1692 
1693 	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1694 		vm_page_queues[PQ_ACTIVE].lcnt,
1695 		vm_page_queues[PQ_INACTIVE].lcnt);
1696 }
1697 #endif /* DDB */
1698