xref: /dflybsd-src/sys/vm/vm_page.c (revision 2d0700913d3c55b6181d2b703dd69aae2179ce8c)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
35  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
36  */
37 
38 /*
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 /*
65  * Resident memory management module.  The module manipulates 'VM pages'.
66  * A VM page is the core building block for memory management.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/proc.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
75 #include <sys/kernel.h>
76 #include <sys/alist.h>
77 #include <sys/sysctl.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <sys/lock.h>
82 #include <vm/vm_kern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
90 #include <vm/swap_pager.h>
91 
92 #include <machine/inttypes.h>
93 #include <machine/md_var.h>
94 
95 #include <vm/vm_page2.h>
96 #include <sys/spinlock2.h>
97 
98 #define VMACTION_HSIZE	256
99 #define VMACTION_HMASK	(VMACTION_HSIZE - 1)
100 
101 static void vm_page_queue_init(void);
102 static void vm_page_free_wakeup(void);
103 static vm_page_t vm_page_select_cache(u_short pg_color);
104 static vm_page_t _vm_page_list_find2(int basequeue, int index);
105 static void _vm_page_deactivate_locked(vm_page_t m, int athead);
106 
107 /*
108  * Array of tailq lists
109  */
110 __cachealign struct vpgqueues vm_page_queues[PQ_COUNT];
111 
112 LIST_HEAD(vm_page_action_list, vm_page_action);
113 struct vm_page_action_list	action_list[VMACTION_HSIZE];
114 static volatile int vm_pages_waiting;
115 
116 static struct alist vm_contig_alist;
117 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
118 static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin);
119 
120 static u_long vm_dma_reserved = 0;
121 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
122 SYSCTL_ULONG(_vm, OID_AUTO, dma_reserved, CTLFLAG_RD, &vm_dma_reserved, 0,
123 	    "Memory reserved for DMA");
124 SYSCTL_UINT(_vm, OID_AUTO, dma_free_pages, CTLFLAG_RD,
125 	    &vm_contig_alist.bl_free, 0, "Memory reserved for DMA");
126 
127 static int vm_contig_verbose = 0;
128 TUNABLE_INT("vm.contig_verbose", &vm_contig_verbose);
129 
130 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
131 	     vm_pindex_t, pindex);
132 
133 static void
134 vm_page_queue_init(void)
135 {
136 	int i;
137 
138 	for (i = 0; i < PQ_L2_SIZE; i++)
139 		vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
140 	for (i = 0; i < PQ_L2_SIZE; i++)
141 		vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
142 	for (i = 0; i < PQ_L2_SIZE; i++)
143 		vm_page_queues[PQ_INACTIVE+i].cnt = &vmstats.v_inactive_count;
144 	for (i = 0; i < PQ_L2_SIZE; i++)
145 		vm_page_queues[PQ_ACTIVE+i].cnt = &vmstats.v_active_count;
146 	for (i = 0; i < PQ_L2_SIZE; i++)
147 		vm_page_queues[PQ_HOLD+i].cnt = &vmstats.v_active_count;
148 	/* PQ_NONE has no queue */
149 
150 	for (i = 0; i < PQ_COUNT; i++) {
151 		TAILQ_INIT(&vm_page_queues[i].pl);
152 		spin_init(&vm_page_queues[i].spin);
153 	}
154 
155 	for (i = 0; i < VMACTION_HSIZE; i++)
156 		LIST_INIT(&action_list[i]);
157 }
158 
159 /*
160  * note: place in initialized data section?  Is this necessary?
161  */
162 long first_page = 0;
163 int vm_page_array_size = 0;
164 int vm_page_zero_count = 0;
165 vm_page_t vm_page_array = NULL;
166 vm_paddr_t vm_low_phys_reserved;
167 
168 /*
169  * (low level boot)
170  *
171  * Sets the page size, perhaps based upon the memory size.
172  * Must be called before any use of page-size dependent functions.
173  */
174 void
175 vm_set_page_size(void)
176 {
177 	if (vmstats.v_page_size == 0)
178 		vmstats.v_page_size = PAGE_SIZE;
179 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
180 		panic("vm_set_page_size: page size not a power of two");
181 }
182 
183 /*
184  * (low level boot)
185  *
186  * Add a new page to the freelist for use by the system.  New pages
187  * are added to both the head and tail of the associated free page
188  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
189  * requests pull 'recent' adds (higher physical addresses) first.
190  *
191  * Beware that the page zeroing daemon will also be running soon after
192  * boot, moving pages from the head to the tail of the PQ_FREE queues.
193  *
194  * Must be called in a critical section.
195  */
196 static void
197 vm_add_new_page(vm_paddr_t pa)
198 {
199 	struct vpgqueues *vpq;
200 	vm_page_t m;
201 
202 	m = PHYS_TO_VM_PAGE(pa);
203 	m->phys_addr = pa;
204 	m->flags = 0;
205 	m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
206 	/*
207 	 * Twist for cpu localization in addition to page coloring, so
208 	 * different cpus selecting by m->queue get different page colors.
209 	 */
210 	m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE) & PQ_L2_MASK;
211 	m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE)) & PQ_L2_MASK;
212 	/*
213 	 * Reserve a certain number of contiguous low memory pages for
214 	 * contigmalloc() to use.
215 	 */
216 	if (pa < vm_low_phys_reserved) {
217 		atomic_add_int(&vmstats.v_page_count, 1);
218 		atomic_add_int(&vmstats.v_dma_pages, 1);
219 		m->queue = PQ_NONE;
220 		m->wire_count = 1;
221 		atomic_add_int(&vmstats.v_wire_count, 1);
222 		alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1);
223 		return;
224 	}
225 
226 	/*
227 	 * General page
228 	 */
229 	m->queue = m->pc + PQ_FREE;
230 	KKASSERT(m->dirty == 0);
231 
232 	atomic_add_int(&vmstats.v_page_count, 1);
233 	atomic_add_int(&vmstats.v_free_count, 1);
234 	vpq = &vm_page_queues[m->queue];
235 	if ((vpq->flipflop & 15) == 0) {
236 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
237 		m->flags |= PG_ZERO;
238 		TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
239 		atomic_add_int(&vm_page_zero_count, 1);
240 	} else {
241 		TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
242 	}
243 	++vpq->flipflop;
244 	++vpq->lcnt;
245 }
246 
247 /*
248  * (low level boot)
249  *
250  * Initializes the resident memory module.
251  *
252  * Preallocates memory for critical VM structures and arrays prior to
253  * kernel_map becoming available.
254  *
255  * Memory is allocated from (virtual2_start, virtual2_end) if available,
256  * otherwise memory is allocated from (virtual_start, virtual_end).
257  *
258  * On x86-64 (virtual_start, virtual_end) is only 2GB and may not be
259  * large enough to hold vm_page_array & other structures for machines with
260  * large amounts of ram, so we want to use virtual2* when available.
261  */
262 void
263 vm_page_startup(void)
264 {
265 	vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start;
266 	vm_offset_t mapped;
267 	vm_size_t npages;
268 	vm_paddr_t page_range;
269 	vm_paddr_t new_end;
270 	int i;
271 	vm_paddr_t pa;
272 	int nblocks;
273 	vm_paddr_t last_pa;
274 	vm_paddr_t end;
275 	vm_paddr_t biggestone, biggestsize;
276 	vm_paddr_t total;
277 
278 	total = 0;
279 	biggestsize = 0;
280 	biggestone = 0;
281 	nblocks = 0;
282 	vaddr = round_page(vaddr);
283 
284 	for (i = 0; phys_avail[i + 1]; i += 2) {
285 		phys_avail[i] = round_page64(phys_avail[i]);
286 		phys_avail[i + 1] = trunc_page64(phys_avail[i + 1]);
287 	}
288 
289 	for (i = 0; phys_avail[i + 1]; i += 2) {
290 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
291 
292 		if (size > biggestsize) {
293 			biggestone = i;
294 			biggestsize = size;
295 		}
296 		++nblocks;
297 		total += size;
298 	}
299 
300 	end = phys_avail[biggestone+1];
301 	end = trunc_page(end);
302 
303 	/*
304 	 * Initialize the queue headers for the free queue, the active queue
305 	 * and the inactive queue.
306 	 */
307 	vm_page_queue_init();
308 
309 #if !defined(_KERNEL_VIRTUAL)
310 	/*
311 	 * VKERNELs don't support minidumps and as such don't need
312 	 * vm_page_dump
313 	 *
314 	 * Allocate a bitmap to indicate that a random physical page
315 	 * needs to be included in a minidump.
316 	 *
317 	 * The amd64 port needs this to indicate which direct map pages
318 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
319 	 *
320 	 * However, i386 still needs this workspace internally within the
321 	 * minidump code.  In theory, they are not needed on i386, but are
322 	 * included should the sf_buf code decide to use them.
323 	 */
324 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
325 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
326 	end -= vm_page_dump_size;
327 	vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
328 	    VM_PROT_READ | VM_PROT_WRITE);
329 	bzero((void *)vm_page_dump, vm_page_dump_size);
330 #endif
331 	/*
332 	 * Compute the number of pages of memory that will be available for
333 	 * use (taking into account the overhead of a page structure per
334 	 * page).
335 	 */
336 	first_page = phys_avail[0] / PAGE_SIZE;
337 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
338 	npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
339 
340 #ifndef _KERNEL_VIRTUAL
341 	/*
342 	 * (only applies to real kernels)
343 	 *
344 	 * Initialize the contiguous reserve map.  We initially reserve up
345 	 * to 1/4 available physical memory or 65536 pages (~256MB), whichever
346 	 * is lower.
347 	 *
348 	 * Once device initialization is complete we return most of the
349 	 * reserved memory back to the normal page queues but leave some
350 	 * in reserve for things like usb attachments.
351 	 */
352 	vm_low_phys_reserved = (vm_paddr_t)65536 << PAGE_SHIFT;
353 	if (vm_low_phys_reserved > total / 4)
354 		vm_low_phys_reserved = total / 4;
355 	if (vm_dma_reserved == 0) {
356 		vm_dma_reserved = 16 * 1024 * 1024;	/* 16MB */
357 		if (vm_dma_reserved > total / 16)
358 			vm_dma_reserved = total / 16;
359 	}
360 #endif
361 	alist_init(&vm_contig_alist, 65536, vm_contig_ameta,
362 		   ALIST_RECORDS_65536);
363 
364 	/*
365 	 * Initialize the mem entry structures now, and put them in the free
366 	 * queue.
367 	 */
368 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
369 	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
370 	vm_page_array = (vm_page_t)mapped;
371 
372 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
373 	/*
374 	 * since pmap_map on amd64 returns stuff out of a direct-map region,
375 	 * we have to manually add these pages to the minidump tracking so
376 	 * that they can be dumped, including the vm_page_array.
377 	 */
378 	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
379 		dump_add_page(pa);
380 #endif
381 
382 	/*
383 	 * Clear all of the page structures
384 	 */
385 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
386 	vm_page_array_size = page_range;
387 
388 	/*
389 	 * Construct the free queue(s) in ascending order (by physical
390 	 * address) so that the first 16MB of physical memory is allocated
391 	 * last rather than first.  On large-memory machines, this avoids
392 	 * the exhaustion of low physical memory before isa_dmainit has run.
393 	 */
394 	vmstats.v_page_count = 0;
395 	vmstats.v_free_count = 0;
396 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
397 		pa = phys_avail[i];
398 		if (i == biggestone)
399 			last_pa = new_end;
400 		else
401 			last_pa = phys_avail[i + 1];
402 		while (pa < last_pa && npages-- > 0) {
403 			vm_add_new_page(pa);
404 			pa += PAGE_SIZE;
405 		}
406 	}
407 	if (virtual2_start)
408 		virtual2_start = vaddr;
409 	else
410 		virtual_start = vaddr;
411 }
412 
413 /*
414  * We tended to reserve a ton of memory for contigmalloc().  Now that most
415  * drivers have initialized we want to return most the remaining free
416  * reserve back to the VM page queues so they can be used for normal
417  * allocations.
418  *
419  * We leave vm_dma_reserved bytes worth of free pages in the reserve pool.
420  */
421 static void
422 vm_page_startup_finish(void *dummy __unused)
423 {
424 	alist_blk_t blk;
425 	alist_blk_t rblk;
426 	alist_blk_t count;
427 	alist_blk_t xcount;
428 	alist_blk_t bfree;
429 	vm_page_t m;
430 
431 	spin_lock(&vm_contig_spin);
432 	for (;;) {
433 		bfree = alist_free_info(&vm_contig_alist, &blk, &count);
434 		if (bfree <= vm_dma_reserved / PAGE_SIZE)
435 			break;
436 		if (count == 0)
437 			break;
438 
439 		/*
440 		 * Figure out how much of the initial reserve we have to
441 		 * free in order to reach our target.
442 		 */
443 		bfree -= vm_dma_reserved / PAGE_SIZE;
444 		if (count > bfree) {
445 			blk += count - bfree;
446 			count = bfree;
447 		}
448 
449 		/*
450 		 * Calculate the nearest power of 2 <= count.
451 		 */
452 		for (xcount = 1; xcount <= count; xcount <<= 1)
453 			;
454 		xcount >>= 1;
455 		blk += count - xcount;
456 		count = xcount;
457 
458 		/*
459 		 * Allocate the pages from the alist, then free them to
460 		 * the normal VM page queues.
461 		 *
462 		 * Pages allocated from the alist are wired.  We have to
463 		 * busy, unwire, and free them.  We must also adjust
464 		 * vm_low_phys_reserved before freeing any pages to prevent
465 		 * confusion.
466 		 */
467 		rblk = alist_alloc(&vm_contig_alist, blk, count);
468 		if (rblk != blk) {
469 			kprintf("vm_page_startup_finish: Unable to return "
470 				"dma space @0x%08x/%d -> 0x%08x\n",
471 				blk, count, rblk);
472 			break;
473 		}
474 		atomic_add_int(&vmstats.v_dma_pages, -count);
475 		spin_unlock(&vm_contig_spin);
476 
477 		m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
478 		vm_low_phys_reserved = VM_PAGE_TO_PHYS(m);
479 		while (count) {
480 			vm_page_busy_wait(m, FALSE, "cpgfr");
481 			vm_page_unwire(m, 0);
482 			vm_page_free(m);
483 			--count;
484 			++m;
485 		}
486 		spin_lock(&vm_contig_spin);
487 	}
488 	spin_unlock(&vm_contig_spin);
489 
490 	/*
491 	 * Print out how much DMA space drivers have already allocated and
492 	 * how much is left over.
493 	 */
494 	kprintf("DMA space used: %jdk, remaining available: %jdk\n",
495 		(intmax_t)(vmstats.v_dma_pages - vm_contig_alist.bl_free) *
496 		(PAGE_SIZE / 1024),
497 		(intmax_t)vm_contig_alist.bl_free * (PAGE_SIZE / 1024));
498 }
499 SYSINIT(vm_pgend, SI_SUB_PROC0_POST, SI_ORDER_ANY,
500 	vm_page_startup_finish, NULL)
501 
502 
503 /*
504  * Scan comparison function for Red-Black tree scans.  An inclusive
505  * (start,end) is expected.  Other fields are not used.
506  */
507 int
508 rb_vm_page_scancmp(struct vm_page *p, void *data)
509 {
510 	struct rb_vm_page_scan_info *info = data;
511 
512 	if (p->pindex < info->start_pindex)
513 		return(-1);
514 	if (p->pindex > info->end_pindex)
515 		return(1);
516 	return(0);
517 }
518 
519 int
520 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
521 {
522 	if (p1->pindex < p2->pindex)
523 		return(-1);
524 	if (p1->pindex > p2->pindex)
525 		return(1);
526 	return(0);
527 }
528 
529 /*
530  * Each page queue has its own spin lock, which is fairly optimal for
531  * allocating and freeing pages at least.
532  *
533  * The caller must hold the vm_page_spin_lock() before locking a vm_page's
534  * queue spinlock via this function.  Also note that m->queue cannot change
535  * unless both the page and queue are locked.
536  */
537 static __inline
538 void
539 _vm_page_queue_spin_lock(vm_page_t m)
540 {
541 	u_short queue;
542 
543 	queue = m->queue;
544 	if (queue != PQ_NONE) {
545 		spin_lock(&vm_page_queues[queue].spin);
546 		KKASSERT(queue == m->queue);
547 	}
548 }
549 
550 static __inline
551 void
552 _vm_page_queue_spin_unlock(vm_page_t m)
553 {
554 	u_short queue;
555 
556 	queue = m->queue;
557 	cpu_ccfence();
558 	if (queue != PQ_NONE)
559 		spin_unlock(&vm_page_queues[queue].spin);
560 }
561 
562 static __inline
563 void
564 _vm_page_queues_spin_lock(u_short queue)
565 {
566 	cpu_ccfence();
567 	if (queue != PQ_NONE)
568 		spin_lock(&vm_page_queues[queue].spin);
569 }
570 
571 
572 static __inline
573 void
574 _vm_page_queues_spin_unlock(u_short queue)
575 {
576 	cpu_ccfence();
577 	if (queue != PQ_NONE)
578 		spin_unlock(&vm_page_queues[queue].spin);
579 }
580 
581 void
582 vm_page_queue_spin_lock(vm_page_t m)
583 {
584 	_vm_page_queue_spin_lock(m);
585 }
586 
587 void
588 vm_page_queues_spin_lock(u_short queue)
589 {
590 	_vm_page_queues_spin_lock(queue);
591 }
592 
593 void
594 vm_page_queue_spin_unlock(vm_page_t m)
595 {
596 	_vm_page_queue_spin_unlock(m);
597 }
598 
599 void
600 vm_page_queues_spin_unlock(u_short queue)
601 {
602 	_vm_page_queues_spin_unlock(queue);
603 }
604 
605 /*
606  * This locks the specified vm_page and its queue in the proper order
607  * (page first, then queue).  The queue may change so the caller must
608  * recheck on return.
609  */
610 static __inline
611 void
612 _vm_page_and_queue_spin_lock(vm_page_t m)
613 {
614 	vm_page_spin_lock(m);
615 	_vm_page_queue_spin_lock(m);
616 }
617 
618 static __inline
619 void
620 _vm_page_and_queue_spin_unlock(vm_page_t m)
621 {
622 	_vm_page_queues_spin_unlock(m->queue);
623 	vm_page_spin_unlock(m);
624 }
625 
626 void
627 vm_page_and_queue_spin_unlock(vm_page_t m)
628 {
629 	_vm_page_and_queue_spin_unlock(m);
630 }
631 
632 void
633 vm_page_and_queue_spin_lock(vm_page_t m)
634 {
635 	_vm_page_and_queue_spin_lock(m);
636 }
637 
638 /*
639  * Helper function removes vm_page from its current queue.
640  * Returns the base queue the page used to be on.
641  *
642  * The vm_page and the queue must be spinlocked.
643  * This function will unlock the queue but leave the page spinlocked.
644  */
645 static __inline u_short
646 _vm_page_rem_queue_spinlocked(vm_page_t m)
647 {
648 	struct vpgqueues *pq;
649 	u_short queue;
650 
651 	queue = m->queue;
652 	if (queue != PQ_NONE) {
653 		pq = &vm_page_queues[queue];
654 		TAILQ_REMOVE(&pq->pl, m, pageq);
655 		atomic_add_int(pq->cnt, -1);
656 		pq->lcnt--;
657 		m->queue = PQ_NONE;
658 		vm_page_queues_spin_unlock(queue);
659 		if ((queue - m->pc) == PQ_FREE && (m->flags & PG_ZERO))
660 			atomic_subtract_int(&vm_page_zero_count, 1);
661 		if ((queue - m->pc) == PQ_CACHE || (queue - m->pc) == PQ_FREE)
662 			return (queue - m->pc);
663 	}
664 	return queue;
665 }
666 
667 /*
668  * Helper function places the vm_page on the specified queue.
669  *
670  * The vm_page must be spinlocked.
671  * This function will return with both the page and the queue locked.
672  */
673 static __inline void
674 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
675 {
676 	struct vpgqueues *pq;
677 
678 	KKASSERT(m->queue == PQ_NONE);
679 
680 	if (queue != PQ_NONE) {
681 		vm_page_queues_spin_lock(queue);
682 		pq = &vm_page_queues[queue];
683 		++pq->lcnt;
684 		atomic_add_int(pq->cnt, 1);
685 		m->queue = queue;
686 
687 		/*
688 		 * Put zero'd pages on the end ( where we look for zero'd pages
689 		 * first ) and non-zerod pages at the head.
690 		 */
691 		if (queue - m->pc == PQ_FREE) {
692 			if (m->flags & PG_ZERO) {
693 				TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
694 				atomic_add_int(&vm_page_zero_count, 1);
695 			} else {
696 				TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
697 			}
698 		} else if (athead) {
699 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
700 		} else {
701 			TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
702 		}
703 		/* leave the queue spinlocked */
704 	}
705 }
706 
707 /*
708  * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
709  * m->busy is zero.  Returns TRUE if it had to sleep, FALSE if we
710  * did not.  Only one sleep call will be made before returning.
711  *
712  * This function does NOT busy the page and on return the page is not
713  * guaranteed to be available.
714  */
715 void
716 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
717 {
718 	u_int32_t flags;
719 
720 	for (;;) {
721 		flags = m->flags;
722 		cpu_ccfence();
723 
724 		if ((flags & PG_BUSY) == 0 &&
725 		    (also_m_busy == 0 || (flags & PG_SBUSY) == 0)) {
726 			break;
727 		}
728 		tsleep_interlock(m, 0);
729 		if (atomic_cmpset_int(&m->flags, flags,
730 				      flags | PG_WANTED | PG_REFERENCED)) {
731 			tsleep(m, PINTERLOCKED, msg, 0);
732 			break;
733 		}
734 	}
735 }
736 
737 /*
738  * Wait until PG_BUSY can be set, then set it.  If also_m_busy is TRUE we
739  * also wait for m->busy to become 0 before setting PG_BUSY.
740  */
741 void
742 VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
743 				     int also_m_busy, const char *msg
744 				     VM_PAGE_DEBUG_ARGS)
745 {
746 	u_int32_t flags;
747 
748 	for (;;) {
749 		flags = m->flags;
750 		cpu_ccfence();
751 		if (flags & PG_BUSY) {
752 			tsleep_interlock(m, 0);
753 			if (atomic_cmpset_int(&m->flags, flags,
754 					  flags | PG_WANTED | PG_REFERENCED)) {
755 				tsleep(m, PINTERLOCKED, msg, 0);
756 			}
757 		} else if (also_m_busy && (flags & PG_SBUSY)) {
758 			tsleep_interlock(m, 0);
759 			if (atomic_cmpset_int(&m->flags, flags,
760 					  flags | PG_WANTED | PG_REFERENCED)) {
761 				tsleep(m, PINTERLOCKED, msg, 0);
762 			}
763 		} else {
764 			if (atomic_cmpset_int(&m->flags, flags,
765 					      flags | PG_BUSY)) {
766 #ifdef VM_PAGE_DEBUG
767 				m->busy_func = func;
768 				m->busy_line = lineno;
769 #endif
770 				break;
771 			}
772 		}
773 	}
774 }
775 
776 /*
777  * Attempt to set PG_BUSY.  If also_m_busy is TRUE we only succeed if m->busy
778  * is also 0.
779  *
780  * Returns non-zero on failure.
781  */
782 int
783 VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy
784 				    VM_PAGE_DEBUG_ARGS)
785 {
786 	u_int32_t flags;
787 
788 	for (;;) {
789 		flags = m->flags;
790 		cpu_ccfence();
791 		if (flags & PG_BUSY)
792 			return TRUE;
793 		if (also_m_busy && (flags & PG_SBUSY))
794 			return TRUE;
795 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
796 #ifdef VM_PAGE_DEBUG
797 				m->busy_func = func;
798 				m->busy_line = lineno;
799 #endif
800 			return FALSE;
801 		}
802 	}
803 }
804 
805 /*
806  * Clear the PG_BUSY flag and return non-zero to indicate to the caller
807  * that a wakeup() should be performed.
808  *
809  * The vm_page must be spinlocked and will remain spinlocked on return.
810  * The related queue must NOT be spinlocked (which could deadlock us).
811  *
812  * (inline version)
813  */
814 static __inline
815 int
816 _vm_page_wakeup(vm_page_t m)
817 {
818 	u_int32_t flags;
819 
820 	for (;;) {
821 		flags = m->flags;
822 		cpu_ccfence();
823 		if (atomic_cmpset_int(&m->flags, flags,
824 				      flags & ~(PG_BUSY | PG_WANTED))) {
825 			break;
826 		}
827 	}
828 	return(flags & PG_WANTED);
829 }
830 
831 /*
832  * Clear the PG_BUSY flag and wakeup anyone waiting for the page.  This
833  * is typically the last call you make on a page before moving onto
834  * other things.
835  */
836 void
837 vm_page_wakeup(vm_page_t m)
838 {
839         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
840 	vm_page_spin_lock(m);
841 	if (_vm_page_wakeup(m)) {
842 		vm_page_spin_unlock(m);
843 		wakeup(m);
844 	} else {
845 		vm_page_spin_unlock(m);
846 	}
847 }
848 
849 /*
850  * Holding a page keeps it from being reused.  Other parts of the system
851  * can still disassociate the page from its current object and free it, or
852  * perform read or write I/O on it and/or otherwise manipulate the page,
853  * but if the page is held the VM system will leave the page and its data
854  * intact and not reuse the page for other purposes until the last hold
855  * reference is released.  (see vm_page_wire() if you want to prevent the
856  * page from being disassociated from its object too).
857  *
858  * The caller must still validate the contents of the page and, if necessary,
859  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
860  * before manipulating the page.
861  *
862  * XXX get vm_page_spin_lock() here and move FREE->HOLD if necessary
863  */
864 void
865 vm_page_hold(vm_page_t m)
866 {
867 	vm_page_spin_lock(m);
868 	atomic_add_int(&m->hold_count, 1);
869 	if (m->queue - m->pc == PQ_FREE) {
870 		_vm_page_queue_spin_lock(m);
871 		_vm_page_rem_queue_spinlocked(m);
872 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
873 		_vm_page_queue_spin_unlock(m);
874 	}
875 	vm_page_spin_unlock(m);
876 }
877 
878 /*
879  * The opposite of vm_page_hold().  A page can be freed while being held,
880  * which places it on the PQ_HOLD queue.  If we are able to busy the page
881  * after the hold count drops to zero we will move the page to the
882  * appropriate PQ_FREE queue by calling vm_page_free_toq().
883  */
884 void
885 vm_page_unhold(vm_page_t m)
886 {
887 	vm_page_spin_lock(m);
888 	atomic_add_int(&m->hold_count, -1);
889 	if (m->hold_count == 0 && m->queue - m->pc == PQ_HOLD) {
890 		_vm_page_queue_spin_lock(m);
891 		_vm_page_rem_queue_spinlocked(m);
892 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
893 		_vm_page_queue_spin_unlock(m);
894 	}
895 	vm_page_spin_unlock(m);
896 }
897 
898 /*
899  * Inserts the given vm_page into the object and object list.
900  *
901  * The pagetables are not updated but will presumably fault the page
902  * in if necessary, or if a kernel page the caller will at some point
903  * enter the page into the kernel's pmap.  We are not allowed to block
904  * here so we *can't* do this anyway.
905  *
906  * This routine may not block.
907  * This routine must be called with the vm_object held.
908  * This routine must be called with a critical section held.
909  *
910  * This routine returns TRUE if the page was inserted into the object
911  * successfully, and FALSE if the page already exists in the object.
912  */
913 int
914 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
915 {
916 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(object));
917 	if (m->object != NULL)
918 		panic("vm_page_insert: already inserted");
919 
920 	object->generation++;
921 
922 	/*
923 	 * Record the object/offset pair in this page and add the
924 	 * pv_list_count of the page to the object.
925 	 *
926 	 * The vm_page spin lock is required for interactions with the pmap.
927 	 */
928 	vm_page_spin_lock(m);
929 	m->object = object;
930 	m->pindex = pindex;
931 	if (vm_page_rb_tree_RB_INSERT(&object->rb_memq, m)) {
932 		m->object = NULL;
933 		m->pindex = 0;
934 		vm_page_spin_unlock(m);
935 		return FALSE;
936 	}
937 	object->resident_page_count++;
938 	/* atomic_add_int(&object->agg_pv_list_count, m->md.pv_list_count); */
939 	vm_page_spin_unlock(m);
940 
941 	/*
942 	 * Since we are inserting a new and possibly dirty page,
943 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
944 	 */
945 	if ((m->valid & m->dirty) ||
946 	    (m->flags & (PG_WRITEABLE | PG_NEED_COMMIT)))
947 		vm_object_set_writeable_dirty(object);
948 
949 	/*
950 	 * Checks for a swap assignment and sets PG_SWAPPED if appropriate.
951 	 */
952 	swap_pager_page_inserted(m);
953 	return TRUE;
954 }
955 
956 /*
957  * Removes the given vm_page_t from the (object,index) table
958  *
959  * The underlying pmap entry (if any) is NOT removed here.
960  * This routine may not block.
961  *
962  * The page must be BUSY and will remain BUSY on return.
963  * No other requirements.
964  *
965  * NOTE: FreeBSD side effect was to unbusy the page on return.  We leave
966  *	 it busy.
967  */
968 void
969 vm_page_remove(vm_page_t m)
970 {
971 	vm_object_t object;
972 
973 	if (m->object == NULL) {
974 		return;
975 	}
976 
977 	if ((m->flags & PG_BUSY) == 0)
978 		panic("vm_page_remove: page not busy");
979 
980 	object = m->object;
981 
982 	vm_object_hold(object);
983 
984 	/*
985 	 * Remove the page from the object and update the object.
986 	 *
987 	 * The vm_page spin lock is required for interactions with the pmap.
988 	 */
989 	vm_page_spin_lock(m);
990 	vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
991 	object->resident_page_count--;
992 	/* atomic_add_int(&object->agg_pv_list_count, -m->md.pv_list_count); */
993 	m->object = NULL;
994 	vm_page_spin_unlock(m);
995 
996 	object->generation++;
997 
998 	vm_object_drop(object);
999 }
1000 
1001 /*
1002  * Locate and return the page at (object, pindex), or NULL if the
1003  * page could not be found.
1004  *
1005  * The caller must hold the vm_object token.
1006  */
1007 vm_page_t
1008 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1009 {
1010 	vm_page_t m;
1011 
1012 	/*
1013 	 * Search the hash table for this object/offset pair
1014 	 */
1015 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1016 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1017 	KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
1018 	return(m);
1019 }
1020 
1021 vm_page_t
1022 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *object,
1023 					    vm_pindex_t pindex,
1024 					    int also_m_busy, const char *msg
1025 					    VM_PAGE_DEBUG_ARGS)
1026 {
1027 	u_int32_t flags;
1028 	vm_page_t m;
1029 
1030 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1031 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1032 	while (m) {
1033 		KKASSERT(m->object == object && m->pindex == pindex);
1034 		flags = m->flags;
1035 		cpu_ccfence();
1036 		if (flags & PG_BUSY) {
1037 			tsleep_interlock(m, 0);
1038 			if (atomic_cmpset_int(&m->flags, flags,
1039 					  flags | PG_WANTED | PG_REFERENCED)) {
1040 				tsleep(m, PINTERLOCKED, msg, 0);
1041 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1042 							      pindex);
1043 			}
1044 		} else if (also_m_busy && (flags & PG_SBUSY)) {
1045 			tsleep_interlock(m, 0);
1046 			if (atomic_cmpset_int(&m->flags, flags,
1047 					  flags | PG_WANTED | PG_REFERENCED)) {
1048 				tsleep(m, PINTERLOCKED, msg, 0);
1049 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1050 							      pindex);
1051 			}
1052 		} else if (atomic_cmpset_int(&m->flags, flags,
1053 					     flags | PG_BUSY)) {
1054 #ifdef VM_PAGE_DEBUG
1055 			m->busy_func = func;
1056 			m->busy_line = lineno;
1057 #endif
1058 			break;
1059 		}
1060 	}
1061 	return m;
1062 }
1063 
1064 /*
1065  * Attempt to lookup and busy a page.
1066  *
1067  * Returns NULL if the page could not be found
1068  *
1069  * Returns a vm_page and error == TRUE if the page exists but could not
1070  * be busied.
1071  *
1072  * Returns a vm_page and error == FALSE on success.
1073  */
1074 vm_page_t
1075 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *object,
1076 					   vm_pindex_t pindex,
1077 					   int also_m_busy, int *errorp
1078 					   VM_PAGE_DEBUG_ARGS)
1079 {
1080 	u_int32_t flags;
1081 	vm_page_t m;
1082 
1083 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1084 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1085 	*errorp = FALSE;
1086 	while (m) {
1087 		KKASSERT(m->object == object && m->pindex == pindex);
1088 		flags = m->flags;
1089 		cpu_ccfence();
1090 		if (flags & PG_BUSY) {
1091 			*errorp = TRUE;
1092 			break;
1093 		}
1094 		if (also_m_busy && (flags & PG_SBUSY)) {
1095 			*errorp = TRUE;
1096 			break;
1097 		}
1098 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
1099 #ifdef VM_PAGE_DEBUG
1100 			m->busy_func = func;
1101 			m->busy_line = lineno;
1102 #endif
1103 			break;
1104 		}
1105 	}
1106 	return m;
1107 }
1108 
1109 /*
1110  * Caller must hold the related vm_object
1111  */
1112 vm_page_t
1113 vm_page_next(vm_page_t m)
1114 {
1115 	vm_page_t next;
1116 
1117 	next = vm_page_rb_tree_RB_NEXT(m);
1118 	if (next && next->pindex != m->pindex + 1)
1119 		next = NULL;
1120 	return (next);
1121 }
1122 
1123 /*
1124  * vm_page_rename()
1125  *
1126  * Move the given vm_page from its current object to the specified
1127  * target object/offset.  The page must be busy and will remain so
1128  * on return.
1129  *
1130  * new_object must be held.
1131  * This routine might block. XXX ?
1132  *
1133  * NOTE: Swap associated with the page must be invalidated by the move.  We
1134  *       have to do this for several reasons:  (1) we aren't freeing the
1135  *       page, (2) we are dirtying the page, (3) the VM system is probably
1136  *       moving the page from object A to B, and will then later move
1137  *       the backing store from A to B and we can't have a conflict.
1138  *
1139  * NOTE: We *always* dirty the page.  It is necessary both for the
1140  *       fact that we moved it, and because we may be invalidating
1141  *	 swap.  If the page is on the cache, we have to deactivate it
1142  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
1143  *	 on the cache.
1144  */
1145 void
1146 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1147 {
1148 	KKASSERT(m->flags & PG_BUSY);
1149 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(new_object));
1150 	if (m->object) {
1151 		ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(m->object));
1152 		vm_page_remove(m);
1153 	}
1154 	if (vm_page_insert(m, new_object, new_pindex) == FALSE) {
1155 		panic("vm_page_rename: target exists (%p,%"PRIu64")",
1156 		      new_object, new_pindex);
1157 	}
1158 	if (m->queue - m->pc == PQ_CACHE)
1159 		vm_page_deactivate(m);
1160 	vm_page_dirty(m);
1161 }
1162 
1163 /*
1164  * vm_page_unqueue() without any wakeup.  This routine is used when a page
1165  * is being moved between queues or otherwise is to remain BUSYied by the
1166  * caller.
1167  *
1168  * This routine may not block.
1169  */
1170 void
1171 vm_page_unqueue_nowakeup(vm_page_t m)
1172 {
1173 	vm_page_and_queue_spin_lock(m);
1174 	(void)_vm_page_rem_queue_spinlocked(m);
1175 	vm_page_spin_unlock(m);
1176 }
1177 
1178 /*
1179  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
1180  * if necessary.
1181  *
1182  * This routine may not block.
1183  */
1184 void
1185 vm_page_unqueue(vm_page_t m)
1186 {
1187 	u_short queue;
1188 
1189 	vm_page_and_queue_spin_lock(m);
1190 	queue = _vm_page_rem_queue_spinlocked(m);
1191 	if (queue == PQ_FREE || queue == PQ_CACHE) {
1192 		vm_page_spin_unlock(m);
1193 		pagedaemon_wakeup();
1194 	} else {
1195 		vm_page_spin_unlock(m);
1196 	}
1197 }
1198 
1199 /*
1200  * vm_page_list_find()
1201  *
1202  * Find a page on the specified queue with color optimization.
1203  *
1204  * The page coloring optimization attempts to locate a page that does
1205  * not overload other nearby pages in the object in the cpu's L1 or L2
1206  * caches.  We need this optimization because cpu caches tend to be
1207  * physical caches, while object spaces tend to be virtual.
1208  *
1209  * On MP systems each PQ_FREE and PQ_CACHE color queue has its own spinlock
1210  * and the algorithm is adjusted to localize allocations on a per-core basis.
1211  * This is done by 'twisting' the colors.
1212  *
1213  * The page is returned spinlocked and removed from its queue (it will
1214  * be on PQ_NONE), or NULL. The page is not PG_BUSY'd.  The caller
1215  * is responsible for dealing with the busy-page case (usually by
1216  * deactivating the page and looping).
1217  *
1218  * NOTE:  This routine is carefully inlined.  A non-inlined version
1219  *	  is available for outside callers but the only critical path is
1220  *	  from within this source file.
1221  *
1222  * NOTE:  This routine assumes that the vm_pages found in PQ_CACHE and PQ_FREE
1223  *	  represent stable storage, allowing us to order our locks vm_page
1224  *	  first, then queue.
1225  */
1226 static __inline
1227 vm_page_t
1228 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1229 {
1230 	vm_page_t m;
1231 
1232 	for (;;) {
1233 		if (prefer_zero)
1234 			m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
1235 		else
1236 			m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
1237 		if (m == NULL) {
1238 			m = _vm_page_list_find2(basequeue, index);
1239 			return(m);
1240 		}
1241 		vm_page_and_queue_spin_lock(m);
1242 		if (m->queue == basequeue + index) {
1243 			_vm_page_rem_queue_spinlocked(m);
1244 			/* vm_page_t spin held, no queue spin */
1245 			break;
1246 		}
1247 		vm_page_and_queue_spin_unlock(m);
1248 	}
1249 	return(m);
1250 }
1251 
1252 static vm_page_t
1253 _vm_page_list_find2(int basequeue, int index)
1254 {
1255 	int i;
1256 	vm_page_t m = NULL;
1257 	struct vpgqueues *pq;
1258 
1259 	pq = &vm_page_queues[basequeue];
1260 
1261 	/*
1262 	 * Note that for the first loop, index+i and index-i wind up at the
1263 	 * same place.  Even though this is not totally optimal, we've already
1264 	 * blown it by missing the cache case so we do not care.
1265 	 */
1266 	for (i = PQ_L2_SIZE / 2; i > 0; --i) {
1267 		for (;;) {
1268 			m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl);
1269 			if (m) {
1270 				_vm_page_and_queue_spin_lock(m);
1271 				if (m->queue ==
1272 				    basequeue + ((index + i) & PQ_L2_MASK)) {
1273 					_vm_page_rem_queue_spinlocked(m);
1274 					return(m);
1275 				}
1276 				_vm_page_and_queue_spin_unlock(m);
1277 				continue;
1278 			}
1279 			m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl);
1280 			if (m) {
1281 				_vm_page_and_queue_spin_lock(m);
1282 				if (m->queue ==
1283 				    basequeue + ((index - i) & PQ_L2_MASK)) {
1284 					_vm_page_rem_queue_spinlocked(m);
1285 					return(m);
1286 				}
1287 				_vm_page_and_queue_spin_unlock(m);
1288 				continue;
1289 			}
1290 			break;	/* next i */
1291 		}
1292 	}
1293 	return(m);
1294 }
1295 
1296 /*
1297  * Returns a vm_page candidate for allocation.  The page is not busied so
1298  * it can move around.  The caller must busy the page (and typically
1299  * deactivate it if it cannot be busied!)
1300  *
1301  * Returns a spinlocked vm_page that has been removed from its queue.
1302  */
1303 vm_page_t
1304 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1305 {
1306 	return(_vm_page_list_find(basequeue, index, prefer_zero));
1307 }
1308 
1309 /*
1310  * Find a page on the cache queue with color optimization, remove it
1311  * from the queue, and busy it.  The returned page will not be spinlocked.
1312  *
1313  * A candidate failure will be deactivated.  Candidates can fail due to
1314  * being busied by someone else, in which case they will be deactivated.
1315  *
1316  * This routine may not block.
1317  *
1318  */
1319 static vm_page_t
1320 vm_page_select_cache(u_short pg_color)
1321 {
1322 	vm_page_t m;
1323 
1324 	for (;;) {
1325 		m = _vm_page_list_find(PQ_CACHE, pg_color & PQ_L2_MASK, FALSE);
1326 		if (m == NULL)
1327 			break;
1328 		/*
1329 		 * (m) has been removed from its queue and spinlocked
1330 		 */
1331 		if (vm_page_busy_try(m, TRUE)) {
1332 			_vm_page_deactivate_locked(m, 0);
1333 			vm_page_spin_unlock(m);
1334 #ifdef INVARIANTS
1335                         kprintf("Warning: busy page %p found in cache\n", m);
1336 #endif
1337 		} else {
1338 			/*
1339 			 * We successfully busied the page
1340 			 */
1341 			if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0 &&
1342 			    m->hold_count == 0 &&
1343 			    m->wire_count == 0 &&
1344 			    (m->dirty & m->valid) == 0) {
1345 				vm_page_spin_unlock(m);
1346 				pagedaemon_wakeup();
1347 				return(m);
1348 			}
1349 
1350 			/*
1351 			 * The page cannot be recycled, deactivate it.
1352 			 */
1353 			_vm_page_deactivate_locked(m, 0);
1354 			if (_vm_page_wakeup(m)) {
1355 				vm_page_spin_unlock(m);
1356 				wakeup(m);
1357 			} else {
1358 				vm_page_spin_unlock(m);
1359 			}
1360 		}
1361 	}
1362 	return (m);
1363 }
1364 
1365 /*
1366  * Find a free or zero page, with specified preference.  We attempt to
1367  * inline the nominal case and fall back to _vm_page_select_free()
1368  * otherwise.  A busied page is removed from the queue and returned.
1369  *
1370  * This routine may not block.
1371  */
1372 static __inline vm_page_t
1373 vm_page_select_free(u_short pg_color, boolean_t prefer_zero)
1374 {
1375 	vm_page_t m;
1376 
1377 	for (;;) {
1378 		m = _vm_page_list_find(PQ_FREE, pg_color & PQ_L2_MASK,
1379 				       prefer_zero);
1380 		if (m == NULL)
1381 			break;
1382 		if (vm_page_busy_try(m, TRUE)) {
1383 			/*
1384 			 * Various mechanisms such as a pmap_collect can
1385 			 * result in a busy page on the free queue.  We
1386 			 * have to move the page out of the way so we can
1387 			 * retry the allocation.  If the other thread is not
1388 			 * allocating the page then m->valid will remain 0 and
1389 			 * the pageout daemon will free the page later on.
1390 			 *
1391 			 * Since we could not busy the page, however, we
1392 			 * cannot make assumptions as to whether the page
1393 			 * will be allocated by the other thread or not,
1394 			 * so all we can do is deactivate it to move it out
1395 			 * of the way.  In particular, if the other thread
1396 			 * wires the page it may wind up on the inactive
1397 			 * queue and the pageout daemon will have to deal
1398 			 * with that case too.
1399 			 */
1400 			_vm_page_deactivate_locked(m, 0);
1401 			vm_page_spin_unlock(m);
1402 #ifdef INVARIANTS
1403                         kprintf("Warning: busy page %p found in cache\n", m);
1404 #endif
1405 		} else {
1406 			/*
1407 			 * Theoretically if we are able to busy the page
1408 			 * atomic with the queue removal (using the vm_page
1409 			 * lock) nobody else should be able to mess with the
1410 			 * page before us.
1411 			 */
1412 			KKASSERT((m->flags & (PG_UNMANAGED |
1413 					      PG_NEED_COMMIT)) == 0);
1414 			KKASSERT(m->hold_count == 0);
1415 			KKASSERT(m->wire_count == 0);
1416 			vm_page_spin_unlock(m);
1417 			pagedaemon_wakeup();
1418 
1419 			/* return busied and removed page */
1420 			return(m);
1421 		}
1422 	}
1423 	return(m);
1424 }
1425 
1426 /*
1427  * This implements a per-cpu cache of free, zero'd, ready-to-go pages.
1428  * The idea is to populate this cache prior to acquiring any locks so
1429  * we don't wind up potentially zeroing VM pages (under heavy loads) while
1430  * holding potentialy contending locks.
1431  *
1432  * Note that we allocate the page uninserted into anything and use a pindex
1433  * of 0, the vm_page_alloc() will effectively add gd_cpuid so these
1434  * allocations should wind up being uncontended.  However, we still want
1435  * to rove across PQ_L2_SIZE.
1436  */
1437 void
1438 vm_page_pcpu_cache(void)
1439 {
1440 #if 0
1441 	globaldata_t gd = mycpu;
1442 	vm_page_t m;
1443 
1444 	if (gd->gd_vmpg_count < GD_MINVMPG) {
1445 		crit_enter_gd(gd);
1446 		while (gd->gd_vmpg_count < GD_MAXVMPG) {
1447 			m = vm_page_alloc(NULL, ticks & ~ncpus2_mask,
1448 					  VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL |
1449 					  VM_ALLOC_NULL_OK | VM_ALLOC_ZERO);
1450 			if (gd->gd_vmpg_count < GD_MAXVMPG) {
1451 				if ((m->flags & PG_ZERO) == 0) {
1452 					pmap_zero_page(VM_PAGE_TO_PHYS(m));
1453 					vm_page_flag_set(m, PG_ZERO);
1454 				}
1455 				gd->gd_vmpg_array[gd->gd_vmpg_count++] = m;
1456 			} else {
1457 				vm_page_free(m);
1458 			}
1459 		}
1460 		crit_exit_gd(gd);
1461 	}
1462 #endif
1463 }
1464 
1465 /*
1466  * vm_page_alloc()
1467  *
1468  * Allocate and return a memory cell associated with this VM object/offset
1469  * pair.  If object is NULL an unassociated page will be allocated.
1470  *
1471  * The returned page will be busied and removed from its queues.  This
1472  * routine can block and may return NULL if a race occurs and the page
1473  * is found to already exist at the specified (object, pindex).
1474  *
1475  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
1476  *	VM_ALLOC_QUICK		like normal but cannot use cache
1477  *	VM_ALLOC_SYSTEM		greater free drain
1478  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
1479  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page only
1480  *	VM_ALLOC_FORCE_ZERO	advisory request for pre-zero'd page only
1481  *	VM_ALLOC_NULL_OK	ok to return NULL on insertion collision
1482  *				(see vm_page_grab())
1483  *	VM_ALLOC_USE_GD		ok to use per-gd cache
1484  *
1485  * The object must be held if not NULL
1486  * This routine may not block
1487  *
1488  * Additional special handling is required when called from an interrupt
1489  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
1490  * in this case.
1491  */
1492 vm_page_t
1493 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
1494 {
1495 	globaldata_t gd = mycpu;
1496 	vm_object_t obj;
1497 	vm_page_t m;
1498 	u_short pg_color;
1499 
1500 #if 0
1501 	/*
1502 	 * Special per-cpu free VM page cache.  The pages are pre-busied
1503 	 * and pre-zerod for us.
1504 	 */
1505 	if (gd->gd_vmpg_count && (page_req & VM_ALLOC_USE_GD)) {
1506 		crit_enter_gd(gd);
1507 		if (gd->gd_vmpg_count) {
1508 			m = gd->gd_vmpg_array[--gd->gd_vmpg_count];
1509 			crit_exit_gd(gd);
1510 			goto done;
1511                 }
1512 		crit_exit_gd(gd);
1513         }
1514 #endif
1515 	m = NULL;
1516 
1517 	/*
1518 	 * Cpu twist - cpu localization algorithm
1519 	 */
1520 	if (object) {
1521 		pg_color = gd->gd_cpuid + (pindex & ~ncpus_fit_mask) +
1522 			   (object->pg_color & ~ncpus_fit_mask);
1523 	} else {
1524 		pg_color = gd->gd_cpuid + (pindex & ~ncpus_fit_mask);
1525 	}
1526 	KKASSERT(page_req &
1527 		(VM_ALLOC_NORMAL|VM_ALLOC_QUICK|
1528 		 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1529 
1530 	/*
1531 	 * Certain system threads (pageout daemon, buf_daemon's) are
1532 	 * allowed to eat deeper into the free page list.
1533 	 */
1534 	if (curthread->td_flags & TDF_SYSTHREAD)
1535 		page_req |= VM_ALLOC_SYSTEM;
1536 
1537 loop:
1538 	if (vmstats.v_free_count > vmstats.v_free_reserved ||
1539 	    ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
1540 	    ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
1541 		vmstats.v_free_count > vmstats.v_interrupt_free_min)
1542 	) {
1543 		/*
1544 		 * The free queue has sufficient free pages to take one out.
1545 		 */
1546 		if (page_req & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO))
1547 			m = vm_page_select_free(pg_color, TRUE);
1548 		else
1549 			m = vm_page_select_free(pg_color, FALSE);
1550 	} else if (page_req & VM_ALLOC_NORMAL) {
1551 		/*
1552 		 * Allocatable from the cache (non-interrupt only).  On
1553 		 * success, we must free the page and try again, thus
1554 		 * ensuring that vmstats.v_*_free_min counters are replenished.
1555 		 */
1556 #ifdef INVARIANTS
1557 		if (curthread->td_preempted) {
1558 			kprintf("vm_page_alloc(): warning, attempt to allocate"
1559 				" cache page from preempting interrupt\n");
1560 			m = NULL;
1561 		} else {
1562 			m = vm_page_select_cache(pg_color);
1563 		}
1564 #else
1565 		m = vm_page_select_cache(pg_color);
1566 #endif
1567 		/*
1568 		 * On success move the page into the free queue and loop.
1569 		 *
1570 		 * Only do this if we can safely acquire the vm_object lock,
1571 		 * because this is effectively a random page and the caller
1572 		 * might be holding the lock shared, we don't want to
1573 		 * deadlock.
1574 		 */
1575 		if (m != NULL) {
1576 			KASSERT(m->dirty == 0,
1577 				("Found dirty cache page %p", m));
1578 			if ((obj = m->object) != NULL) {
1579 				if (vm_object_hold_try(obj)) {
1580 					vm_page_protect(m, VM_PROT_NONE);
1581 					vm_page_free(m);
1582 					/* m->object NULL here */
1583 					vm_object_drop(obj);
1584 				} else {
1585 					vm_page_deactivate(m);
1586 					vm_page_wakeup(m);
1587 				}
1588 			} else {
1589 				vm_page_protect(m, VM_PROT_NONE);
1590 				vm_page_free(m);
1591 			}
1592 			goto loop;
1593 		}
1594 
1595 		/*
1596 		 * On failure return NULL
1597 		 */
1598 #if defined(DIAGNOSTIC)
1599 		if (vmstats.v_cache_count > 0)
1600 			kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
1601 #endif
1602 		vm_pageout_deficit++;
1603 		pagedaemon_wakeup();
1604 		return (NULL);
1605 	} else {
1606 		/*
1607 		 * No pages available, wakeup the pageout daemon and give up.
1608 		 */
1609 		vm_pageout_deficit++;
1610 		pagedaemon_wakeup();
1611 		return (NULL);
1612 	}
1613 
1614 	/*
1615 	 * v_free_count can race so loop if we don't find the expected
1616 	 * page.
1617 	 */
1618 	if (m == NULL)
1619 		goto loop;
1620 
1621 	/*
1622 	 * Good page found.  The page has already been busied for us and
1623 	 * removed from its queues.
1624 	 */
1625 	KASSERT(m->dirty == 0,
1626 		("vm_page_alloc: free/cache page %p was dirty", m));
1627 	KKASSERT(m->queue == PQ_NONE);
1628 
1629 #if 0
1630 done:
1631 #endif
1632 	/*
1633 	 * Initialize the structure, inheriting some flags but clearing
1634 	 * all the rest.  The page has already been busied for us.
1635 	 */
1636 	vm_page_flag_clear(m, ~(PG_ZERO | PG_BUSY | PG_SBUSY));
1637 	KKASSERT(m->wire_count == 0);
1638 	KKASSERT(m->busy == 0);
1639 	m->act_count = 0;
1640 	m->valid = 0;
1641 
1642 	/*
1643 	 * Caller must be holding the object lock (asserted by
1644 	 * vm_page_insert()).
1645 	 *
1646 	 * NOTE: Inserting a page here does not insert it into any pmaps
1647 	 *	 (which could cause us to block allocating memory).
1648 	 *
1649 	 * NOTE: If no object an unassociated page is allocated, m->pindex
1650 	 *	 can be used by the caller for any purpose.
1651 	 */
1652 	if (object) {
1653 		if (vm_page_insert(m, object, pindex) == FALSE) {
1654 			vm_page_free(m);
1655 			if ((page_req & VM_ALLOC_NULL_OK) == 0)
1656 				panic("PAGE RACE %p[%ld]/%p",
1657 				      object, (long)pindex, m);
1658 			m = NULL;
1659 		}
1660 	} else {
1661 		m->pindex = pindex;
1662 	}
1663 
1664 	/*
1665 	 * Don't wakeup too often - wakeup the pageout daemon when
1666 	 * we would be nearly out of memory.
1667 	 */
1668 	pagedaemon_wakeup();
1669 
1670 	/*
1671 	 * A PG_BUSY page is returned.
1672 	 */
1673 	return (m);
1674 }
1675 
1676 /*
1677  * Attempt to allocate contiguous physical memory with the specified
1678  * requirements.
1679  */
1680 vm_page_t
1681 vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
1682 		     unsigned long alignment, unsigned long boundary,
1683 		     unsigned long size)
1684 {
1685 	alist_blk_t blk;
1686 
1687 	alignment >>= PAGE_SHIFT;
1688 	if (alignment == 0)
1689 		alignment = 1;
1690 	boundary >>= PAGE_SHIFT;
1691 	if (boundary == 0)
1692 		boundary = 1;
1693 	size = (size + PAGE_MASK) >> PAGE_SHIFT;
1694 
1695 	spin_lock(&vm_contig_spin);
1696 	blk = alist_alloc(&vm_contig_alist, 0, size);
1697 	if (blk == ALIST_BLOCK_NONE) {
1698 		spin_unlock(&vm_contig_spin);
1699 		if (bootverbose) {
1700 			kprintf("vm_page_alloc_contig: %ldk nospace\n",
1701 				(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1702 		}
1703 		return(NULL);
1704 	}
1705 	if (high && ((vm_paddr_t)(blk + size) << PAGE_SHIFT) > high) {
1706 		alist_free(&vm_contig_alist, blk, size);
1707 		spin_unlock(&vm_contig_spin);
1708 		if (bootverbose) {
1709 			kprintf("vm_page_alloc_contig: %ldk high "
1710 				"%016jx failed\n",
1711 				(size + PAGE_MASK) * (PAGE_SIZE / 1024),
1712 				(intmax_t)high);
1713 		}
1714 		return(NULL);
1715 	}
1716 	spin_unlock(&vm_contig_spin);
1717 	if (vm_contig_verbose) {
1718 		kprintf("vm_page_alloc_contig: %016jx/%ldk\n",
1719 			(intmax_t)(vm_paddr_t)blk << PAGE_SHIFT,
1720 			(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1721 	}
1722 	return (PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT));
1723 }
1724 
1725 /*
1726  * Free contiguously allocated pages.  The pages will be wired but not busy.
1727  * When freeing to the alist we leave them wired and not busy.
1728  */
1729 void
1730 vm_page_free_contig(vm_page_t m, unsigned long size)
1731 {
1732 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1733 	vm_pindex_t start = pa >> PAGE_SHIFT;
1734 	vm_pindex_t pages = (size + PAGE_MASK) >> PAGE_SHIFT;
1735 
1736 	if (vm_contig_verbose) {
1737 		kprintf("vm_page_free_contig:  %016jx/%ldk\n",
1738 			(intmax_t)pa, size / 1024);
1739 	}
1740 	if (pa < vm_low_phys_reserved) {
1741 		KKASSERT(pa + size <= vm_low_phys_reserved);
1742 		spin_lock(&vm_contig_spin);
1743 		alist_free(&vm_contig_alist, start, pages);
1744 		spin_unlock(&vm_contig_spin);
1745 	} else {
1746 		while (pages) {
1747 			vm_page_busy_wait(m, FALSE, "cpgfr");
1748 			vm_page_unwire(m, 0);
1749 			vm_page_free(m);
1750 			--pages;
1751 			++m;
1752 		}
1753 
1754 	}
1755 }
1756 
1757 
1758 /*
1759  * Wait for sufficient free memory for nominal heavy memory use kernel
1760  * operations.
1761  *
1762  * WARNING!  Be sure never to call this in any vm_pageout code path, which
1763  *	     will trivially deadlock the system.
1764  */
1765 void
1766 vm_wait_nominal(void)
1767 {
1768 	while (vm_page_count_min(0))
1769 		vm_wait(0);
1770 }
1771 
1772 /*
1773  * Test if vm_wait_nominal() would block.
1774  */
1775 int
1776 vm_test_nominal(void)
1777 {
1778 	if (vm_page_count_min(0))
1779 		return(1);
1780 	return(0);
1781 }
1782 
1783 /*
1784  * Block until free pages are available for allocation, called in various
1785  * places before memory allocations.
1786  *
1787  * The caller may loop if vm_page_count_min() == FALSE so we cannot be
1788  * more generous then that.
1789  */
1790 void
1791 vm_wait(int timo)
1792 {
1793 	/*
1794 	 * never wait forever
1795 	 */
1796 	if (timo == 0)
1797 		timo = hz;
1798 	lwkt_gettoken(&vm_token);
1799 
1800 	if (curthread == pagethread) {
1801 		/*
1802 		 * The pageout daemon itself needs pages, this is bad.
1803 		 */
1804 		if (vm_page_count_min(0)) {
1805 			vm_pageout_pages_needed = 1;
1806 			tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
1807 		}
1808 	} else {
1809 		/*
1810 		 * Wakeup the pageout daemon if necessary and wait.
1811 		 */
1812 		if (vm_page_count_target()) {
1813 			if (vm_pages_needed == 0) {
1814 				vm_pages_needed = 1;
1815 				wakeup(&vm_pages_needed);
1816 			}
1817 			++vm_pages_waiting;	/* SMP race ok */
1818 			tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
1819 		}
1820 	}
1821 	lwkt_reltoken(&vm_token);
1822 }
1823 
1824 /*
1825  * Block until free pages are available for allocation
1826  *
1827  * Called only from vm_fault so that processes page faulting can be
1828  * easily tracked.
1829  */
1830 void
1831 vm_wait_pfault(void)
1832 {
1833 	/*
1834 	 * Wakeup the pageout daemon if necessary and wait.
1835 	 */
1836 	if (vm_page_count_min(0)) {
1837 		lwkt_gettoken(&vm_token);
1838 		while (vm_page_count_severe()) {
1839 			if (vm_page_count_target()) {
1840 				if (vm_pages_needed == 0) {
1841 					vm_pages_needed = 1;
1842 					wakeup(&vm_pages_needed);
1843 				}
1844 				++vm_pages_waiting;	/* SMP race ok */
1845 				tsleep(&vmstats.v_free_count, 0, "pfault", hz);
1846 			}
1847 		}
1848 		lwkt_reltoken(&vm_token);
1849 	}
1850 }
1851 
1852 /*
1853  * Put the specified page on the active list (if appropriate).  Ensure
1854  * that act_count is at least ACT_INIT but do not otherwise mess with it.
1855  *
1856  * The caller should be holding the page busied ? XXX
1857  * This routine may not block.
1858  */
1859 void
1860 vm_page_activate(vm_page_t m)
1861 {
1862 	u_short oqueue;
1863 
1864 	vm_page_spin_lock(m);
1865 	if (m->queue - m->pc != PQ_ACTIVE) {
1866 		_vm_page_queue_spin_lock(m);
1867 		oqueue = _vm_page_rem_queue_spinlocked(m);
1868 		/* page is left spinlocked, queue is unlocked */
1869 
1870 		if (oqueue == PQ_CACHE)
1871 			mycpu->gd_cnt.v_reactivated++;
1872 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1873 			if (m->act_count < ACT_INIT)
1874 				m->act_count = ACT_INIT;
1875 			_vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0);
1876 		}
1877 		_vm_page_and_queue_spin_unlock(m);
1878 		if (oqueue == PQ_CACHE || oqueue == PQ_FREE)
1879 			pagedaemon_wakeup();
1880 	} else {
1881 		if (m->act_count < ACT_INIT)
1882 			m->act_count = ACT_INIT;
1883 		vm_page_spin_unlock(m);
1884 	}
1885 }
1886 
1887 /*
1888  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
1889  * routine is called when a page has been added to the cache or free
1890  * queues.
1891  *
1892  * This routine may not block.
1893  */
1894 static __inline void
1895 vm_page_free_wakeup(void)
1896 {
1897 	/*
1898 	 * If the pageout daemon itself needs pages, then tell it that
1899 	 * there are some free.
1900 	 */
1901 	if (vm_pageout_pages_needed &&
1902 	    vmstats.v_cache_count + vmstats.v_free_count >=
1903 	    vmstats.v_pageout_free_min
1904 	) {
1905 		wakeup(&vm_pageout_pages_needed);
1906 		vm_pageout_pages_needed = 0;
1907 	}
1908 
1909 	/*
1910 	 * Wakeup processes that are waiting on memory.
1911 	 *
1912 	 * NOTE: vm_paging_target() is the pageout daemon's target, while
1913 	 *	 vm_page_count_target() is somewhere inbetween.  We want
1914 	 *	 to wake processes up prior to the pageout daemon reaching
1915 	 *	 its target to provide some hysteresis.
1916 	 */
1917 	if (vm_pages_waiting) {
1918 		if (!vm_page_count_target()) {
1919 			/*
1920 			 * Plenty of pages are free, wakeup everyone.
1921 			 */
1922 			vm_pages_waiting = 0;
1923 			wakeup(&vmstats.v_free_count);
1924 			++mycpu->gd_cnt.v_ppwakeups;
1925 		} else if (!vm_page_count_min(0)) {
1926 			/*
1927 			 * Some pages are free, wakeup someone.
1928 			 */
1929 			int wcount = vm_pages_waiting;
1930 			if (wcount > 0)
1931 				--wcount;
1932 			vm_pages_waiting = wcount;
1933 			wakeup_one(&vmstats.v_free_count);
1934 			++mycpu->gd_cnt.v_ppwakeups;
1935 		}
1936 	}
1937 }
1938 
1939 /*
1940  * Returns the given page to the PQ_FREE or PQ_HOLD list and disassociates
1941  * it from its VM object.
1942  *
1943  * The vm_page must be PG_BUSY on entry.  PG_BUSY will be released on
1944  * return (the page will have been freed).
1945  */
1946 void
1947 vm_page_free_toq(vm_page_t m)
1948 {
1949 	mycpu->gd_cnt.v_tfree++;
1950 	KKASSERT((m->flags & PG_MAPPED) == 0);
1951 	KKASSERT(m->flags & PG_BUSY);
1952 
1953 	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1954 		kprintf("vm_page_free: pindex(%lu), busy(%d), "
1955 			"PG_BUSY(%d), hold(%d)\n",
1956 			(u_long)m->pindex, m->busy,
1957 			((m->flags & PG_BUSY) ? 1 : 0), m->hold_count);
1958 		if ((m->queue - m->pc) == PQ_FREE)
1959 			panic("vm_page_free: freeing free page");
1960 		else
1961 			panic("vm_page_free: freeing busy page");
1962 	}
1963 
1964 	/*
1965 	 * Remove from object, spinlock the page and its queues and
1966 	 * remove from any queue.  No queue spinlock will be held
1967 	 * after this section (because the page was removed from any
1968 	 * queue).
1969 	 */
1970 	vm_page_remove(m);
1971 	vm_page_and_queue_spin_lock(m);
1972 	_vm_page_rem_queue_spinlocked(m);
1973 
1974 	/*
1975 	 * No further management of fictitious pages occurs beyond object
1976 	 * and queue removal.
1977 	 */
1978 	if ((m->flags & PG_FICTITIOUS) != 0) {
1979 		vm_page_spin_unlock(m);
1980 		vm_page_wakeup(m);
1981 		return;
1982 	}
1983 
1984 	m->valid = 0;
1985 	vm_page_undirty(m);
1986 
1987 	if (m->wire_count != 0) {
1988 		if (m->wire_count > 1) {
1989 		    panic(
1990 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1991 			m->wire_count, (long)m->pindex);
1992 		}
1993 		panic("vm_page_free: freeing wired page");
1994 	}
1995 
1996 	/*
1997 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
1998 	 * Clear the NEED_COMMIT flag
1999 	 */
2000 	if (m->flags & PG_UNMANAGED)
2001 		vm_page_flag_clear(m, PG_UNMANAGED);
2002 	if (m->flags & PG_NEED_COMMIT)
2003 		vm_page_flag_clear(m, PG_NEED_COMMIT);
2004 
2005 	if (m->hold_count != 0) {
2006 		vm_page_flag_clear(m, PG_ZERO);
2007 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
2008 	} else {
2009 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
2010 	}
2011 
2012 	/*
2013 	 * This sequence allows us to clear PG_BUSY while still holding
2014 	 * its spin lock, which reduces contention vs allocators.  We
2015 	 * must not leave the queue locked or _vm_page_wakeup() may
2016 	 * deadlock.
2017 	 */
2018 	_vm_page_queue_spin_unlock(m);
2019 	if (_vm_page_wakeup(m)) {
2020 		vm_page_spin_unlock(m);
2021 		wakeup(m);
2022 	} else {
2023 		vm_page_spin_unlock(m);
2024 	}
2025 	vm_page_free_wakeup();
2026 }
2027 
2028 /*
2029  * vm_page_free_fromq_fast()
2030  *
2031  * Remove a non-zero page from one of the free queues; the page is removed for
2032  * zeroing, so do not issue a wakeup.
2033  */
2034 vm_page_t
2035 vm_page_free_fromq_fast(void)
2036 {
2037 	static int qi;
2038 	vm_page_t m;
2039 	int i;
2040 
2041 	for (i = 0; i < PQ_L2_SIZE; ++i) {
2042 		m = vm_page_list_find(PQ_FREE, qi, FALSE);
2043 		/* page is returned spinlocked and removed from its queue */
2044 		if (m) {
2045 			if (vm_page_busy_try(m, TRUE)) {
2046 				/*
2047 				 * We were unable to busy the page, deactivate
2048 				 * it and loop.
2049 				 */
2050 				_vm_page_deactivate_locked(m, 0);
2051 				vm_page_spin_unlock(m);
2052 			} else if (m->flags & PG_ZERO) {
2053 				/*
2054 				 * The page is PG_ZERO, requeue it and loop
2055 				 */
2056 				_vm_page_add_queue_spinlocked(m,
2057 							      PQ_FREE + m->pc,
2058 							      0);
2059 				vm_page_queue_spin_unlock(m);
2060 				if (_vm_page_wakeup(m)) {
2061 					vm_page_spin_unlock(m);
2062 					wakeup(m);
2063 				} else {
2064 					vm_page_spin_unlock(m);
2065 				}
2066 			} else {
2067 				/*
2068 				 * The page is not PG_ZERO'd so return it.
2069 				 */
2070 				vm_page_spin_unlock(m);
2071 				KKASSERT((m->flags & (PG_UNMANAGED |
2072 						      PG_NEED_COMMIT)) == 0);
2073 				KKASSERT(m->hold_count == 0);
2074 				KKASSERT(m->wire_count == 0);
2075 				break;
2076 			}
2077 			m = NULL;
2078 		}
2079 		qi = (qi + PQ_PRIME2) & PQ_L2_MASK;
2080 	}
2081 	return (m);
2082 }
2083 
2084 /*
2085  * vm_page_unmanage()
2086  *
2087  * Prevent PV management from being done on the page.  The page is
2088  * removed from the paging queues as if it were wired, and as a
2089  * consequence of no longer being managed the pageout daemon will not
2090  * touch it (since there is no way to locate the pte mappings for the
2091  * page).  madvise() calls that mess with the pmap will also no longer
2092  * operate on the page.
2093  *
2094  * Beyond that the page is still reasonably 'normal'.  Freeing the page
2095  * will clear the flag.
2096  *
2097  * This routine is used by OBJT_PHYS objects - objects using unswappable
2098  * physical memory as backing store rather then swap-backed memory and
2099  * will eventually be extended to support 4MB unmanaged physical
2100  * mappings.
2101  *
2102  * Caller must be holding the page busy.
2103  */
2104 void
2105 vm_page_unmanage(vm_page_t m)
2106 {
2107 	KKASSERT(m->flags & PG_BUSY);
2108 	if ((m->flags & PG_UNMANAGED) == 0) {
2109 		if (m->wire_count == 0)
2110 			vm_page_unqueue(m);
2111 	}
2112 	vm_page_flag_set(m, PG_UNMANAGED);
2113 }
2114 
2115 /*
2116  * Mark this page as wired down by yet another map, removing it from
2117  * paging queues as necessary.
2118  *
2119  * Caller must be holding the page busy.
2120  */
2121 void
2122 vm_page_wire(vm_page_t m)
2123 {
2124 	/*
2125 	 * Only bump the wire statistics if the page is not already wired,
2126 	 * and only unqueue the page if it is on some queue (if it is unmanaged
2127 	 * it is already off the queues).  Don't do anything with fictitious
2128 	 * pages because they are always wired.
2129 	 */
2130 	KKASSERT(m->flags & PG_BUSY);
2131 	if ((m->flags & PG_FICTITIOUS) == 0) {
2132 		if (atomic_fetchadd_int(&m->wire_count, 1) == 0) {
2133 			if ((m->flags & PG_UNMANAGED) == 0)
2134 				vm_page_unqueue(m);
2135 			atomic_add_int(&vmstats.v_wire_count, 1);
2136 		}
2137 		KASSERT(m->wire_count != 0,
2138 			("vm_page_wire: wire_count overflow m=%p", m));
2139 	}
2140 }
2141 
2142 /*
2143  * Release one wiring of this page, potentially enabling it to be paged again.
2144  *
2145  * Many pages placed on the inactive queue should actually go
2146  * into the cache, but it is difficult to figure out which.  What
2147  * we do instead, if the inactive target is well met, is to put
2148  * clean pages at the head of the inactive queue instead of the tail.
2149  * This will cause them to be moved to the cache more quickly and
2150  * if not actively re-referenced, freed more quickly.  If we just
2151  * stick these pages at the end of the inactive queue, heavy filesystem
2152  * meta-data accesses can cause an unnecessary paging load on memory bound
2153  * processes.  This optimization causes one-time-use metadata to be
2154  * reused more quickly.
2155  *
2156  * Pages marked PG_NEED_COMMIT are always activated and never placed on
2157  * the inactive queue.  This helps the pageout daemon determine memory
2158  * pressure and act on out-of-memory situations more quickly.
2159  *
2160  * BUT, if we are in a low-memory situation we have no choice but to
2161  * put clean pages on the cache queue.
2162  *
2163  * A number of routines use vm_page_unwire() to guarantee that the page
2164  * will go into either the inactive or active queues, and will NEVER
2165  * be placed in the cache - for example, just after dirtying a page.
2166  * dirty pages in the cache are not allowed.
2167  *
2168  * The page queues must be locked.
2169  * This routine may not block.
2170  */
2171 void
2172 vm_page_unwire(vm_page_t m, int activate)
2173 {
2174 	KKASSERT(m->flags & PG_BUSY);
2175 	if (m->flags & PG_FICTITIOUS) {
2176 		/* do nothing */
2177 	} else if (m->wire_count <= 0) {
2178 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
2179 	} else {
2180 		if (atomic_fetchadd_int(&m->wire_count, -1) == 1) {
2181 			atomic_add_int(&vmstats.v_wire_count, -1);
2182 			if (m->flags & PG_UNMANAGED) {
2183 				;
2184 			} else if (activate || (m->flags & PG_NEED_COMMIT)) {
2185 				vm_page_spin_lock(m);
2186 				_vm_page_add_queue_spinlocked(m,
2187 							PQ_ACTIVE + m->pc, 0);
2188 				_vm_page_and_queue_spin_unlock(m);
2189 			} else {
2190 				vm_page_spin_lock(m);
2191 				vm_page_flag_clear(m, PG_WINATCFLS);
2192 				_vm_page_add_queue_spinlocked(m,
2193 							PQ_INACTIVE + m->pc, 0);
2194 				++vm_swapcache_inactive_heuristic;
2195 				_vm_page_and_queue_spin_unlock(m);
2196 			}
2197 		}
2198 	}
2199 }
2200 
2201 /*
2202  * Move the specified page to the inactive queue.  If the page has
2203  * any associated swap, the swap is deallocated.
2204  *
2205  * Normally athead is 0 resulting in LRU operation.  athead is set
2206  * to 1 if we want this page to be 'as if it were placed in the cache',
2207  * except without unmapping it from the process address space.
2208  *
2209  * vm_page's spinlock must be held on entry and will remain held on return.
2210  * This routine may not block.
2211  */
2212 static void
2213 _vm_page_deactivate_locked(vm_page_t m, int athead)
2214 {
2215 	u_short oqueue;
2216 
2217 	/*
2218 	 * Ignore if already inactive.
2219 	 */
2220 	if (m->queue - m->pc == PQ_INACTIVE)
2221 		return;
2222 	_vm_page_queue_spin_lock(m);
2223 	oqueue = _vm_page_rem_queue_spinlocked(m);
2224 
2225 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2226 		if (oqueue == PQ_CACHE)
2227 			mycpu->gd_cnt.v_reactivated++;
2228 		vm_page_flag_clear(m, PG_WINATCFLS);
2229 		_vm_page_add_queue_spinlocked(m, PQ_INACTIVE + m->pc, athead);
2230 		if (athead == 0)
2231 			++vm_swapcache_inactive_heuristic;
2232 	}
2233 	_vm_page_queue_spin_unlock(m);
2234 	/* leaves vm_page spinlocked */
2235 }
2236 
2237 /*
2238  * Attempt to deactivate a page.
2239  *
2240  * No requirements.
2241  */
2242 void
2243 vm_page_deactivate(vm_page_t m)
2244 {
2245 	vm_page_spin_lock(m);
2246 	_vm_page_deactivate_locked(m, 0);
2247 	vm_page_spin_unlock(m);
2248 }
2249 
2250 void
2251 vm_page_deactivate_locked(vm_page_t m)
2252 {
2253 	_vm_page_deactivate_locked(m, 0);
2254 }
2255 
2256 /*
2257  * Attempt to move a page to PQ_CACHE.
2258  *
2259  * Returns 0 on failure, 1 on success
2260  *
2261  * The page should NOT be busied by the caller.  This function will validate
2262  * whether the page can be safely moved to the cache.
2263  */
2264 int
2265 vm_page_try_to_cache(vm_page_t m)
2266 {
2267 	vm_page_spin_lock(m);
2268 	if (vm_page_busy_try(m, TRUE)) {
2269 		vm_page_spin_unlock(m);
2270 		return(0);
2271 	}
2272 	if (m->dirty || m->hold_count || m->wire_count ||
2273 	    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT))) {
2274 		if (_vm_page_wakeup(m)) {
2275 			vm_page_spin_unlock(m);
2276 			wakeup(m);
2277 		} else {
2278 			vm_page_spin_unlock(m);
2279 		}
2280 		return(0);
2281 	}
2282 	vm_page_spin_unlock(m);
2283 
2284 	/*
2285 	 * Page busied by us and no longer spinlocked.  Dirty pages cannot
2286 	 * be moved to the cache.
2287 	 */
2288 	vm_page_test_dirty(m);
2289 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2290 		vm_page_wakeup(m);
2291 		return(0);
2292 	}
2293 	vm_page_cache(m);
2294 	return(1);
2295 }
2296 
2297 /*
2298  * Attempt to free the page.  If we cannot free it, we do nothing.
2299  * 1 is returned on success, 0 on failure.
2300  *
2301  * No requirements.
2302  */
2303 int
2304 vm_page_try_to_free(vm_page_t m)
2305 {
2306 	vm_page_spin_lock(m);
2307 	if (vm_page_busy_try(m, TRUE)) {
2308 		vm_page_spin_unlock(m);
2309 		return(0);
2310 	}
2311 
2312 	/*
2313 	 * The page can be in any state, including already being on the free
2314 	 * queue.  Check to see if it really can be freed.
2315 	 */
2316 	if (m->dirty ||				/* can't free if it is dirty */
2317 	    m->hold_count ||			/* or held (XXX may be wrong) */
2318 	    m->wire_count ||			/* or wired */
2319 	    (m->flags & (PG_UNMANAGED |		/* or unmanaged */
2320 			 PG_NEED_COMMIT)) ||	/* or needs a commit */
2321 	    m->queue - m->pc == PQ_FREE ||	/* already on PQ_FREE */
2322 	    m->queue - m->pc == PQ_HOLD) {	/* already on PQ_HOLD */
2323 		if (_vm_page_wakeup(m)) {
2324 			vm_page_spin_unlock(m);
2325 			wakeup(m);
2326 		} else {
2327 			vm_page_spin_unlock(m);
2328 		}
2329 		return(0);
2330 	}
2331 	vm_page_spin_unlock(m);
2332 
2333 	/*
2334 	 * We can probably free the page.
2335 	 *
2336 	 * Page busied by us and no longer spinlocked.  Dirty pages will
2337 	 * not be freed by this function.    We have to re-test the
2338 	 * dirty bit after cleaning out the pmaps.
2339 	 */
2340 	vm_page_test_dirty(m);
2341 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2342 		vm_page_wakeup(m);
2343 		return(0);
2344 	}
2345 	vm_page_protect(m, VM_PROT_NONE);
2346 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2347 		vm_page_wakeup(m);
2348 		return(0);
2349 	}
2350 	vm_page_free(m);
2351 	return(1);
2352 }
2353 
2354 /*
2355  * vm_page_cache
2356  *
2357  * Put the specified page onto the page cache queue (if appropriate).
2358  *
2359  * The page must be busy, and this routine will release the busy and
2360  * possibly even free the page.
2361  */
2362 void
2363 vm_page_cache(vm_page_t m)
2364 {
2365 	if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
2366 	    m->busy || m->wire_count || m->hold_count) {
2367 		kprintf("vm_page_cache: attempting to cache busy/held page\n");
2368 		vm_page_wakeup(m);
2369 		return;
2370 	}
2371 
2372 	/*
2373 	 * Already in the cache (and thus not mapped)
2374 	 */
2375 	if ((m->queue - m->pc) == PQ_CACHE) {
2376 		KKASSERT((m->flags & PG_MAPPED) == 0);
2377 		vm_page_wakeup(m);
2378 		return;
2379 	}
2380 
2381 	/*
2382 	 * Caller is required to test m->dirty, but note that the act of
2383 	 * removing the page from its maps can cause it to become dirty
2384 	 * on an SMP system due to another cpu running in usermode.
2385 	 */
2386 	if (m->dirty) {
2387 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
2388 			(long)m->pindex);
2389 	}
2390 
2391 	/*
2392 	 * Remove all pmaps and indicate that the page is not
2393 	 * writeable or mapped.  Our vm_page_protect() call may
2394 	 * have blocked (especially w/ VM_PROT_NONE), so recheck
2395 	 * everything.
2396 	 */
2397 	vm_page_protect(m, VM_PROT_NONE);
2398 	if ((m->flags & (PG_UNMANAGED | PG_MAPPED)) ||
2399 	    m->busy || m->wire_count || m->hold_count) {
2400 		vm_page_wakeup(m);
2401 	} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2402 		vm_page_deactivate(m);
2403 		vm_page_wakeup(m);
2404 	} else {
2405 		_vm_page_and_queue_spin_lock(m);
2406 		_vm_page_rem_queue_spinlocked(m);
2407 		_vm_page_add_queue_spinlocked(m, PQ_CACHE + m->pc, 0);
2408 		_vm_page_queue_spin_unlock(m);
2409 		if (_vm_page_wakeup(m)) {
2410 			vm_page_spin_unlock(m);
2411 			wakeup(m);
2412 		} else {
2413 			vm_page_spin_unlock(m);
2414 		}
2415 		vm_page_free_wakeup();
2416 	}
2417 }
2418 
2419 /*
2420  * vm_page_dontneed()
2421  *
2422  * Cache, deactivate, or do nothing as appropriate.  This routine
2423  * is typically used by madvise() MADV_DONTNEED.
2424  *
2425  * Generally speaking we want to move the page into the cache so
2426  * it gets reused quickly.  However, this can result in a silly syndrome
2427  * due to the page recycling too quickly.  Small objects will not be
2428  * fully cached.  On the otherhand, if we move the page to the inactive
2429  * queue we wind up with a problem whereby very large objects
2430  * unnecessarily blow away our inactive and cache queues.
2431  *
2432  * The solution is to move the pages based on a fixed weighting.  We
2433  * either leave them alone, deactivate them, or move them to the cache,
2434  * where moving them to the cache has the highest weighting.
2435  * By forcing some pages into other queues we eventually force the
2436  * system to balance the queues, potentially recovering other unrelated
2437  * space from active.  The idea is to not force this to happen too
2438  * often.
2439  *
2440  * The page must be busied.
2441  */
2442 void
2443 vm_page_dontneed(vm_page_t m)
2444 {
2445 	static int dnweight;
2446 	int dnw;
2447 	int head;
2448 
2449 	dnw = ++dnweight;
2450 
2451 	/*
2452 	 * occassionally leave the page alone
2453 	 */
2454 	if ((dnw & 0x01F0) == 0 ||
2455 	    m->queue - m->pc == PQ_INACTIVE ||
2456 	    m->queue - m->pc == PQ_CACHE
2457 	) {
2458 		if (m->act_count >= ACT_INIT)
2459 			--m->act_count;
2460 		return;
2461 	}
2462 
2463 	/*
2464 	 * If vm_page_dontneed() is inactivating a page, it must clear
2465 	 * the referenced flag; otherwise the pagedaemon will see references
2466 	 * on the page in the inactive queue and reactivate it. Until the
2467 	 * page can move to the cache queue, madvise's job is not done.
2468 	 */
2469 	vm_page_flag_clear(m, PG_REFERENCED);
2470 	pmap_clear_reference(m);
2471 
2472 	if (m->dirty == 0)
2473 		vm_page_test_dirty(m);
2474 
2475 	if (m->dirty || (dnw & 0x0070) == 0) {
2476 		/*
2477 		 * Deactivate the page 3 times out of 32.
2478 		 */
2479 		head = 0;
2480 	} else {
2481 		/*
2482 		 * Cache the page 28 times out of every 32.  Note that
2483 		 * the page is deactivated instead of cached, but placed
2484 		 * at the head of the queue instead of the tail.
2485 		 */
2486 		head = 1;
2487 	}
2488 	vm_page_spin_lock(m);
2489 	_vm_page_deactivate_locked(m, head);
2490 	vm_page_spin_unlock(m);
2491 }
2492 
2493 /*
2494  * These routines manipulate the 'soft busy' count for a page.  A soft busy
2495  * is almost like PG_BUSY except that it allows certain compatible operations
2496  * to occur on the page while it is busy.  For example, a page undergoing a
2497  * write can still be mapped read-only.
2498  *
2499  * Because vm_pages can overlap buffers m->busy can be > 1.  m->busy is only
2500  * adjusted while the vm_page is PG_BUSY so the flash will occur when the
2501  * busy bit is cleared.
2502  */
2503 void
2504 vm_page_io_start(vm_page_t m)
2505 {
2506         KASSERT(m->flags & PG_BUSY, ("vm_page_io_start: page not busy!!!"));
2507         atomic_add_char(&m->busy, 1);
2508 	vm_page_flag_set(m, PG_SBUSY);
2509 }
2510 
2511 void
2512 vm_page_io_finish(vm_page_t m)
2513 {
2514         KASSERT(m->flags & PG_BUSY, ("vm_page_io_finish: page not busy!!!"));
2515         atomic_subtract_char(&m->busy, 1);
2516 	if (m->busy == 0)
2517 		vm_page_flag_clear(m, PG_SBUSY);
2518 }
2519 
2520 /*
2521  * Indicate that a clean VM page requires a filesystem commit and cannot
2522  * be reused.  Used by tmpfs.
2523  */
2524 void
2525 vm_page_need_commit(vm_page_t m)
2526 {
2527 	vm_page_flag_set(m, PG_NEED_COMMIT);
2528 	vm_object_set_writeable_dirty(m->object);
2529 }
2530 
2531 void
2532 vm_page_clear_commit(vm_page_t m)
2533 {
2534 	vm_page_flag_clear(m, PG_NEED_COMMIT);
2535 }
2536 
2537 /*
2538  * Grab a page, blocking if it is busy and allocating a page if necessary.
2539  * A busy page is returned or NULL.  The page may or may not be valid and
2540  * might not be on a queue (the caller is responsible for the disposition of
2541  * the page).
2542  *
2543  * If VM_ALLOC_ZERO is specified and the grab must allocate a new page, the
2544  * page will be zero'd and marked valid.
2545  *
2546  * If VM_ALLOC_FORCE_ZERO is specified the page will be zero'd and marked
2547  * valid even if it already exists.
2548  *
2549  * If VM_ALLOC_RETRY is specified this routine will never return NULL.  Also
2550  * note that VM_ALLOC_NORMAL must be specified if VM_ALLOC_RETRY is specified.
2551  * VM_ALLOC_NULL_OK is implied when VM_ALLOC_RETRY is specified.
2552  *
2553  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
2554  * always returned if we had blocked.
2555  *
2556  * This routine may not be called from an interrupt.
2557  *
2558  * PG_ZERO is *ALWAYS* cleared by this routine.
2559  *
2560  * No other requirements.
2561  */
2562 vm_page_t
2563 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2564 {
2565 	vm_page_t m;
2566 	int error;
2567 
2568 	KKASSERT(allocflags &
2569 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
2570 	vm_object_hold(object);
2571 	for (;;) {
2572 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2573 		if (error) {
2574 			vm_page_sleep_busy(m, TRUE, "pgrbwt");
2575 			if ((allocflags & VM_ALLOC_RETRY) == 0) {
2576 				m = NULL;
2577 				break;
2578 			}
2579 			/* retry */
2580 		} else if (m == NULL) {
2581 			if (allocflags & VM_ALLOC_RETRY)
2582 				allocflags |= VM_ALLOC_NULL_OK;
2583 			m = vm_page_alloc(object, pindex,
2584 					  allocflags & ~VM_ALLOC_RETRY);
2585 			if (m)
2586 				break;
2587 			vm_wait(0);
2588 			if ((allocflags & VM_ALLOC_RETRY) == 0)
2589 				goto failed;
2590 		} else {
2591 			/* m found */
2592 			break;
2593 		}
2594 	}
2595 
2596 	/*
2597 	 * If VM_ALLOC_ZERO an invalid page will be zero'd and set valid.
2598 	 *
2599 	 * If VM_ALLOC_FORCE_ZERO the page is unconditionally zero'd and set
2600 	 * valid even if already valid.
2601 	 */
2602 	if (m->valid == 0) {
2603 		if (allocflags & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO)) {
2604 			if ((m->flags & PG_ZERO) == 0)
2605 				pmap_zero_page(VM_PAGE_TO_PHYS(m));
2606 			m->valid = VM_PAGE_BITS_ALL;
2607 		}
2608 	} else if (allocflags & VM_ALLOC_FORCE_ZERO) {
2609 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
2610 		m->valid = VM_PAGE_BITS_ALL;
2611 	}
2612 	vm_page_flag_clear(m, PG_ZERO);
2613 failed:
2614 	vm_object_drop(object);
2615 	return(m);
2616 }
2617 
2618 /*
2619  * Mapping function for valid bits or for dirty bits in
2620  * a page.  May not block.
2621  *
2622  * Inputs are required to range within a page.
2623  *
2624  * No requirements.
2625  * Non blocking.
2626  */
2627 int
2628 vm_page_bits(int base, int size)
2629 {
2630 	int first_bit;
2631 	int last_bit;
2632 
2633 	KASSERT(
2634 	    base + size <= PAGE_SIZE,
2635 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2636 	);
2637 
2638 	if (size == 0)		/* handle degenerate case */
2639 		return(0);
2640 
2641 	first_bit = base >> DEV_BSHIFT;
2642 	last_bit = (base + size - 1) >> DEV_BSHIFT;
2643 
2644 	return ((2 << last_bit) - (1 << first_bit));
2645 }
2646 
2647 /*
2648  * Sets portions of a page valid and clean.  The arguments are expected
2649  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2650  * of any partial chunks touched by the range.  The invalid portion of
2651  * such chunks will be zero'd.
2652  *
2653  * NOTE: When truncating a buffer vnode_pager_setsize() will automatically
2654  *	 align base to DEV_BSIZE so as not to mark clean a partially
2655  *	 truncated device block.  Otherwise the dirty page status might be
2656  *	 lost.
2657  *
2658  * This routine may not block.
2659  *
2660  * (base + size) must be less then or equal to PAGE_SIZE.
2661  */
2662 static void
2663 _vm_page_zero_valid(vm_page_t m, int base, int size)
2664 {
2665 	int frag;
2666 	int endoff;
2667 
2668 	if (size == 0)	/* handle degenerate case */
2669 		return;
2670 
2671 	/*
2672 	 * If the base is not DEV_BSIZE aligned and the valid
2673 	 * bit is clear, we have to zero out a portion of the
2674 	 * first block.
2675 	 */
2676 
2677 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2678 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
2679 	) {
2680 		pmap_zero_page_area(
2681 		    VM_PAGE_TO_PHYS(m),
2682 		    frag,
2683 		    base - frag
2684 		);
2685 	}
2686 
2687 	/*
2688 	 * If the ending offset is not DEV_BSIZE aligned and the
2689 	 * valid bit is clear, we have to zero out a portion of
2690 	 * the last block.
2691 	 */
2692 
2693 	endoff = base + size;
2694 
2695 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2696 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
2697 	) {
2698 		pmap_zero_page_area(
2699 		    VM_PAGE_TO_PHYS(m),
2700 		    endoff,
2701 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
2702 		);
2703 	}
2704 }
2705 
2706 /*
2707  * Set valid, clear dirty bits.  If validating the entire
2708  * page we can safely clear the pmap modify bit.  We also
2709  * use this opportunity to clear the PG_NOSYNC flag.  If a process
2710  * takes a write fault on a MAP_NOSYNC memory area the flag will
2711  * be set again.
2712  *
2713  * We set valid bits inclusive of any overlap, but we can only
2714  * clear dirty bits for DEV_BSIZE chunks that are fully within
2715  * the range.
2716  *
2717  * Page must be busied?
2718  * No other requirements.
2719  */
2720 void
2721 vm_page_set_valid(vm_page_t m, int base, int size)
2722 {
2723 	_vm_page_zero_valid(m, base, size);
2724 	m->valid |= vm_page_bits(base, size);
2725 }
2726 
2727 
2728 /*
2729  * Set valid bits and clear dirty bits.
2730  *
2731  * NOTE: This function does not clear the pmap modified bit.
2732  *	 Also note that e.g. NFS may use a byte-granular base
2733  *	 and size.
2734  *
2735  * WARNING: Page must be busied?  But vfs_clean_one_page() will call
2736  *	    this without necessarily busying the page (via bdwrite()).
2737  *	    So for now vm_token must also be held.
2738  *
2739  * No other requirements.
2740  */
2741 void
2742 vm_page_set_validclean(vm_page_t m, int base, int size)
2743 {
2744 	int pagebits;
2745 
2746 	_vm_page_zero_valid(m, base, size);
2747 	pagebits = vm_page_bits(base, size);
2748 	m->valid |= pagebits;
2749 	m->dirty &= ~pagebits;
2750 	if (base == 0 && size == PAGE_SIZE) {
2751 		/*pmap_clear_modify(m);*/
2752 		vm_page_flag_clear(m, PG_NOSYNC);
2753 	}
2754 }
2755 
2756 /*
2757  * Set valid & dirty.  Used by buwrite()
2758  *
2759  * WARNING: Page must be busied?  But vfs_dirty_one_page() will
2760  *	    call this function in buwrite() so for now vm_token must
2761  *	    be held.
2762  *
2763  * No other requirements.
2764  */
2765 void
2766 vm_page_set_validdirty(vm_page_t m, int base, int size)
2767 {
2768 	int pagebits;
2769 
2770 	pagebits = vm_page_bits(base, size);
2771 	m->valid |= pagebits;
2772 	m->dirty |= pagebits;
2773 	if (m->object)
2774 	       vm_object_set_writeable_dirty(m->object);
2775 }
2776 
2777 /*
2778  * Clear dirty bits.
2779  *
2780  * NOTE: This function does not clear the pmap modified bit.
2781  *	 Also note that e.g. NFS may use a byte-granular base
2782  *	 and size.
2783  *
2784  * Page must be busied?
2785  * No other requirements.
2786  */
2787 void
2788 vm_page_clear_dirty(vm_page_t m, int base, int size)
2789 {
2790 	m->dirty &= ~vm_page_bits(base, size);
2791 	if (base == 0 && size == PAGE_SIZE) {
2792 		/*pmap_clear_modify(m);*/
2793 		vm_page_flag_clear(m, PG_NOSYNC);
2794 	}
2795 }
2796 
2797 /*
2798  * Make the page all-dirty.
2799  *
2800  * Also make sure the related object and vnode reflect the fact that the
2801  * object may now contain a dirty page.
2802  *
2803  * Page must be busied?
2804  * No other requirements.
2805  */
2806 void
2807 vm_page_dirty(vm_page_t m)
2808 {
2809 #ifdef INVARIANTS
2810         int pqtype = m->queue - m->pc;
2811 #endif
2812         KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
2813                 ("vm_page_dirty: page in free/cache queue!"));
2814 	if (m->dirty != VM_PAGE_BITS_ALL) {
2815 		m->dirty = VM_PAGE_BITS_ALL;
2816 		if (m->object)
2817 			vm_object_set_writeable_dirty(m->object);
2818 	}
2819 }
2820 
2821 /*
2822  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
2823  * valid and dirty bits for the effected areas are cleared.
2824  *
2825  * Page must be busied?
2826  * Does not block.
2827  * No other requirements.
2828  */
2829 void
2830 vm_page_set_invalid(vm_page_t m, int base, int size)
2831 {
2832 	int bits;
2833 
2834 	bits = vm_page_bits(base, size);
2835 	m->valid &= ~bits;
2836 	m->dirty &= ~bits;
2837 	m->object->generation++;
2838 }
2839 
2840 /*
2841  * The kernel assumes that the invalid portions of a page contain
2842  * garbage, but such pages can be mapped into memory by user code.
2843  * When this occurs, we must zero out the non-valid portions of the
2844  * page so user code sees what it expects.
2845  *
2846  * Pages are most often semi-valid when the end of a file is mapped
2847  * into memory and the file's size is not page aligned.
2848  *
2849  * Page must be busied?
2850  * No other requirements.
2851  */
2852 void
2853 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2854 {
2855 	int b;
2856 	int i;
2857 
2858 	/*
2859 	 * Scan the valid bits looking for invalid sections that
2860 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2861 	 * valid bit may be set ) have already been zerod by
2862 	 * vm_page_set_validclean().
2863 	 */
2864 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2865 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2866 		    (m->valid & (1 << i))
2867 		) {
2868 			if (i > b) {
2869 				pmap_zero_page_area(
2870 				    VM_PAGE_TO_PHYS(m),
2871 				    b << DEV_BSHIFT,
2872 				    (i - b) << DEV_BSHIFT
2873 				);
2874 			}
2875 			b = i + 1;
2876 		}
2877 	}
2878 
2879 	/*
2880 	 * setvalid is TRUE when we can safely set the zero'd areas
2881 	 * as being valid.  We can do this if there are no cache consistency
2882 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2883 	 */
2884 	if (setvalid)
2885 		m->valid = VM_PAGE_BITS_ALL;
2886 }
2887 
2888 /*
2889  * Is a (partial) page valid?  Note that the case where size == 0
2890  * will return FALSE in the degenerate case where the page is entirely
2891  * invalid, and TRUE otherwise.
2892  *
2893  * Does not block.
2894  * No other requirements.
2895  */
2896 int
2897 vm_page_is_valid(vm_page_t m, int base, int size)
2898 {
2899 	int bits = vm_page_bits(base, size);
2900 
2901 	if (m->valid && ((m->valid & bits) == bits))
2902 		return 1;
2903 	else
2904 		return 0;
2905 }
2906 
2907 /*
2908  * update dirty bits from pmap/mmu.  May not block.
2909  *
2910  * Caller must hold the page busy
2911  */
2912 void
2913 vm_page_test_dirty(vm_page_t m)
2914 {
2915 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
2916 		vm_page_dirty(m);
2917 	}
2918 }
2919 
2920 /*
2921  * Register an action, associating it with its vm_page
2922  */
2923 void
2924 vm_page_register_action(vm_page_action_t action, vm_page_event_t event)
2925 {
2926 	struct vm_page_action_list *list;
2927 	int hv;
2928 
2929 	hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
2930 	list = &action_list[hv];
2931 
2932 	lwkt_gettoken(&vm_token);
2933 	vm_page_flag_set(action->m, PG_ACTIONLIST);
2934 	action->event = event;
2935 	LIST_INSERT_HEAD(list, action, entry);
2936 	lwkt_reltoken(&vm_token);
2937 }
2938 
2939 /*
2940  * Unregister an action, disassociating it from its related vm_page
2941  */
2942 void
2943 vm_page_unregister_action(vm_page_action_t action)
2944 {
2945 	struct vm_page_action_list *list;
2946 	int hv;
2947 
2948 	lwkt_gettoken(&vm_token);
2949 	if (action->event != VMEVENT_NONE) {
2950 		action->event = VMEVENT_NONE;
2951 		LIST_REMOVE(action, entry);
2952 
2953 		hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
2954 		list = &action_list[hv];
2955 		if (LIST_EMPTY(list))
2956 			vm_page_flag_clear(action->m, PG_ACTIONLIST);
2957 	}
2958 	lwkt_reltoken(&vm_token);
2959 }
2960 
2961 /*
2962  * Issue an event on a VM page.  Corresponding action structures are
2963  * removed from the page's list and called.
2964  *
2965  * If the vm_page has no more pending action events we clear its
2966  * PG_ACTIONLIST flag.
2967  */
2968 void
2969 vm_page_event_internal(vm_page_t m, vm_page_event_t event)
2970 {
2971 	struct vm_page_action_list *list;
2972 	struct vm_page_action *scan;
2973 	struct vm_page_action *next;
2974 	int hv;
2975 	int all;
2976 
2977 	hv = (int)((intptr_t)m >> 8) & VMACTION_HMASK;
2978 	list = &action_list[hv];
2979 	all = 1;
2980 
2981 	lwkt_gettoken(&vm_token);
2982 	LIST_FOREACH_MUTABLE(scan, list, entry, next) {
2983 		if (scan->m == m) {
2984 			if (scan->event == event) {
2985 				scan->event = VMEVENT_NONE;
2986 				LIST_REMOVE(scan, entry);
2987 				scan->func(m, scan);
2988 				/* XXX */
2989 			} else {
2990 				all = 0;
2991 			}
2992 		}
2993 	}
2994 	if (all)
2995 		vm_page_flag_clear(m, PG_ACTIONLIST);
2996 	lwkt_reltoken(&vm_token);
2997 }
2998 
2999 #include "opt_ddb.h"
3000 #ifdef DDB
3001 #include <sys/kernel.h>
3002 
3003 #include <ddb/ddb.h>
3004 
3005 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3006 {
3007 	db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
3008 	db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
3009 	db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
3010 	db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
3011 	db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
3012 	db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
3013 	db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
3014 	db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
3015 	db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
3016 	db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
3017 }
3018 
3019 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3020 {
3021 	int i;
3022 	db_printf("PQ_FREE:");
3023 	for(i=0;i<PQ_L2_SIZE;i++) {
3024 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
3025 	}
3026 	db_printf("\n");
3027 
3028 	db_printf("PQ_CACHE:");
3029 	for(i=0;i<PQ_L2_SIZE;i++) {
3030 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
3031 	}
3032 	db_printf("\n");
3033 
3034 	db_printf("PQ_ACTIVE:");
3035 	for(i=0;i<PQ_L2_SIZE;i++) {
3036 		db_printf(" %d", vm_page_queues[PQ_ACTIVE + i].lcnt);
3037 	}
3038 	db_printf("\n");
3039 
3040 	db_printf("PQ_INACTIVE:");
3041 	for(i=0;i<PQ_L2_SIZE;i++) {
3042 		db_printf(" %d", vm_page_queues[PQ_INACTIVE + i].lcnt);
3043 	}
3044 	db_printf("\n");
3045 }
3046 #endif /* DDB */
3047