xref: /netbsd-src/sys/uvm/uvm_page.c (revision 80d9064ac03cbb6a4174695f0d5b237c8766d3d0)
1 /*	$NetBSD: uvm_page.c,v 1.186 2014/09/05 05:36:21 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
37  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * uvm_page.c: page ops.
66  */
67 
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.186 2014/09/05 05:36:21 matt Exp $");
70 
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_readahead.h"
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sched.h>
78 #include <sys/kernel.h>
79 #include <sys/vnode.h>
80 #include <sys/proc.h>
81 #include <sys/atomic.h>
82 #include <sys/cpu.h>
83 
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_ddb.h>
86 #include <uvm/uvm_pdpolicy.h>
87 
88 /*
89  * global vars... XXXCDC: move to uvm. structure.
90  */
91 
92 /*
93  * physical memory config is stored in vm_physmem.
94  */
95 
96 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
97 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
98 #define	vm_nphysmem	vm_nphysseg
99 
100 /*
101  * Some supported CPUs in a given architecture don't support all
102  * of the things necessary to do idle page zero'ing efficiently.
103  * We therefore provide a way to enable it from machdep code here.
104  */
105 bool vm_page_zero_enable = false;
106 
107 /*
108  * number of pages per-CPU to reserve for the kernel.
109  */
110 int vm_page_reserve_kernel = 5;
111 
112 /*
113  * physical memory size;
114  */
115 int physmem;
116 
117 /*
118  * local variables
119  */
120 
121 /*
122  * these variables record the values returned by vm_page_bootstrap,
123  * for debugging purposes.  The implementation of uvm_pageboot_alloc
124  * and pmap_startup here also uses them internally.
125  */
126 
127 static vaddr_t      virtual_space_start;
128 static vaddr_t      virtual_space_end;
129 
130 /*
131  * we allocate an initial number of page colors in uvm_page_init(),
132  * and remember them.  We may re-color pages as cache sizes are
133  * discovered during the autoconfiguration phase.  But we can never
134  * free the initial set of buckets, since they are allocated using
135  * uvm_pageboot_alloc().
136  */
137 
138 static size_t recolored_pages_memsize /* = 0 */;
139 
140 #ifdef DEBUG
141 vaddr_t uvm_zerocheckkva;
142 #endif /* DEBUG */
143 
144 /*
145  * local prototypes
146  */
147 
148 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
149 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
150 
151 /*
152  * per-object tree of pages
153  */
154 
155 static signed int
156 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
157 {
158 	const struct vm_page *pg1 = n1;
159 	const struct vm_page *pg2 = n2;
160 	const voff_t a = pg1->offset;
161 	const voff_t b = pg2->offset;
162 
163 	if (a < b)
164 		return -1;
165 	if (a > b)
166 		return 1;
167 	return 0;
168 }
169 
170 static signed int
171 uvm_page_compare_key(void *ctx, const void *n, const void *key)
172 {
173 	const struct vm_page *pg = n;
174 	const voff_t a = pg->offset;
175 	const voff_t b = *(const voff_t *)key;
176 
177 	if (a < b)
178 		return -1;
179 	if (a > b)
180 		return 1;
181 	return 0;
182 }
183 
184 const rb_tree_ops_t uvm_page_tree_ops = {
185 	.rbto_compare_nodes = uvm_page_compare_nodes,
186 	.rbto_compare_key = uvm_page_compare_key,
187 	.rbto_node_offset = offsetof(struct vm_page, rb_node),
188 	.rbto_context = NULL
189 };
190 
191 /*
192  * inline functions
193  */
194 
195 /*
196  * uvm_pageinsert: insert a page in the object.
197  *
198  * => caller must lock object
199  * => caller must lock page queues
200  * => call should have already set pg's object and offset pointers
201  *    and bumped the version counter
202  */
203 
204 static inline void
205 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
206     struct vm_page *where)
207 {
208 
209 	KASSERT(uobj == pg->uobject);
210 	KASSERT(mutex_owned(uobj->vmobjlock));
211 	KASSERT((pg->flags & PG_TABLED) == 0);
212 	KASSERT(where == NULL || (where->flags & PG_TABLED));
213 	KASSERT(where == NULL || (where->uobject == uobj));
214 
215 	if (UVM_OBJ_IS_VNODE(uobj)) {
216 		if (uobj->uo_npages == 0) {
217 			struct vnode *vp = (struct vnode *)uobj;
218 
219 			vholdl(vp);
220 		}
221 		if (UVM_OBJ_IS_VTEXT(uobj)) {
222 			atomic_inc_uint(&uvmexp.execpages);
223 		} else {
224 			atomic_inc_uint(&uvmexp.filepages);
225 		}
226 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
227 		atomic_inc_uint(&uvmexp.anonpages);
228 	}
229 
230 	if (where)
231 		TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
232 	else
233 		TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
234 	pg->flags |= PG_TABLED;
235 	uobj->uo_npages++;
236 }
237 
238 
239 static inline void
240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
241 {
242 	struct vm_page *ret __diagused;
243 
244 	KASSERT(uobj == pg->uobject);
245 	ret = rb_tree_insert_node(&uobj->rb_tree, pg);
246 	KASSERT(ret == pg);
247 }
248 
249 static inline void
250 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
251 {
252 
253 	KDASSERT(uobj != NULL);
254 	uvm_pageinsert_tree(uobj, pg);
255 	uvm_pageinsert_list(uobj, pg, NULL);
256 }
257 
258 /*
259  * uvm_page_remove: remove page from object.
260  *
261  * => caller must lock object
262  * => caller must lock page queues
263  */
264 
265 static inline void
266 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
267 {
268 
269 	KASSERT(uobj == pg->uobject);
270 	KASSERT(mutex_owned(uobj->vmobjlock));
271 	KASSERT(pg->flags & PG_TABLED);
272 
273 	if (UVM_OBJ_IS_VNODE(uobj)) {
274 		if (uobj->uo_npages == 1) {
275 			struct vnode *vp = (struct vnode *)uobj;
276 
277 			holdrelel(vp);
278 		}
279 		if (UVM_OBJ_IS_VTEXT(uobj)) {
280 			atomic_dec_uint(&uvmexp.execpages);
281 		} else {
282 			atomic_dec_uint(&uvmexp.filepages);
283 		}
284 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
285 		atomic_dec_uint(&uvmexp.anonpages);
286 	}
287 
288 	/* object should be locked */
289 	uobj->uo_npages--;
290 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
291 	pg->flags &= ~PG_TABLED;
292 	pg->uobject = NULL;
293 }
294 
295 static inline void
296 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
297 {
298 
299 	KASSERT(uobj == pg->uobject);
300 	rb_tree_remove_node(&uobj->rb_tree, pg);
301 }
302 
303 static inline void
304 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
305 {
306 
307 	KDASSERT(uobj != NULL);
308 	uvm_pageremove_tree(uobj, pg);
309 	uvm_pageremove_list(uobj, pg);
310 }
311 
312 static void
313 uvm_page_init_buckets(struct pgfreelist *pgfl)
314 {
315 	int color, i;
316 
317 	for (color = 0; color < uvmexp.ncolors; color++) {
318 		for (i = 0; i < PGFL_NQUEUES; i++) {
319 			LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
320 		}
321 	}
322 }
323 
324 /*
325  * uvm_page_init: init the page system.   called from uvm_init().
326  *
327  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
328  */
329 
330 void
331 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
332 {
333 	static struct uvm_cpu boot_cpu;
334 	psize_t freepages, pagecount, bucketcount, n;
335 	struct pgflbucket *bucketarray, *cpuarray;
336 	struct vm_physseg *seg;
337 	struct vm_page *pagearray;
338 	int lcv;
339 	u_int i;
340 	paddr_t paddr;
341 
342 	KASSERT(ncpu <= 1);
343 	CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
344 
345 	/*
346 	 * init the page queues and page queue locks, except the free
347 	 * list; we allocate that later (with the initial vm_page
348 	 * structures).
349 	 */
350 
351 	uvm.cpus[0] = &boot_cpu;
352 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
353 	uvmpdpol_init();
354 	mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
355 	mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
356 
357 	/*
358 	 * allocate vm_page structures.
359 	 */
360 
361 	/*
362 	 * sanity check:
363 	 * before calling this function the MD code is expected to register
364 	 * some free RAM with the uvm_page_physload() function.   our job
365 	 * now is to allocate vm_page structures for this memory.
366 	 */
367 
368 	if (vm_nphysmem == 0)
369 		panic("uvm_page_bootstrap: no memory pre-allocated");
370 
371 	/*
372 	 * first calculate the number of free pages...
373 	 *
374 	 * note that we use start/end rather than avail_start/avail_end.
375 	 * this allows us to allocate extra vm_page structures in case we
376 	 * want to return some memory to the pool after booting.
377 	 */
378 
379 	freepages = 0;
380 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
381 		seg = VM_PHYSMEM_PTR(lcv);
382 		freepages += (seg->end - seg->start);
383 	}
384 
385 	/*
386 	 * Let MD code initialize the number of colors, or default
387 	 * to 1 color if MD code doesn't care.
388 	 */
389 	if (uvmexp.ncolors == 0)
390 		uvmexp.ncolors = 1;
391 	uvmexp.colormask = uvmexp.ncolors - 1;
392 	KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
393 
394 	/*
395 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
396 	 * use.   for each page of memory we use we need a vm_page structure.
397 	 * thus, the total number of pages we can use is the total size of
398 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
399 	 * structure.   we add one to freepages as a fudge factor to avoid
400 	 * truncation errors (since we can only allocate in terms of whole
401 	 * pages).
402 	 */
403 
404 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
405 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
406 	    (PAGE_SIZE + sizeof(struct vm_page));
407 
408 	bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
409 	    sizeof(struct pgflbucket) * 2) + (pagecount *
410 	    sizeof(struct vm_page)));
411 	cpuarray = bucketarray + bucketcount;
412 	pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
413 
414 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
415 		uvm.page_free[lcv].pgfl_buckets =
416 		    (bucketarray + (lcv * uvmexp.ncolors));
417 		uvm_page_init_buckets(&uvm.page_free[lcv]);
418 		uvm.cpus[0]->page_free[lcv].pgfl_buckets =
419 		    (cpuarray + (lcv * uvmexp.ncolors));
420 		uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
421 	}
422 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
423 
424 	/*
425 	 * init the vm_page structures and put them in the correct place.
426 	 */
427 
428 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
429 		seg = VM_PHYSMEM_PTR(lcv);
430 		n = seg->end - seg->start;
431 
432 		/* set up page array pointers */
433 		seg->pgs = pagearray;
434 		pagearray += n;
435 		pagecount -= n;
436 		seg->lastpg = seg->pgs + n;
437 
438 		/* init and free vm_pages (we've already zeroed them) */
439 		paddr = ctob(seg->start);
440 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
441 			seg->pgs[i].phys_addr = paddr;
442 #ifdef __HAVE_VM_PAGE_MD
443 			VM_MDPAGE_INIT(&seg->pgs[i]);
444 #endif
445 			if (atop(paddr) >= seg->avail_start &&
446 			    atop(paddr) < seg->avail_end) {
447 				uvmexp.npages++;
448 				/* add page to free pool */
449 				uvm_pagefree(&seg->pgs[i]);
450 			}
451 		}
452 	}
453 
454 	/*
455 	 * pass up the values of virtual_space_start and
456 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
457 	 * layers of the VM.
458 	 */
459 
460 	*kvm_startp = round_page(virtual_space_start);
461 	*kvm_endp = trunc_page(virtual_space_end);
462 #ifdef DEBUG
463 	/*
464 	 * steal kva for uvm_pagezerocheck().
465 	 */
466 	uvm_zerocheckkva = *kvm_startp;
467 	*kvm_startp += PAGE_SIZE;
468 #endif /* DEBUG */
469 
470 	/*
471 	 * init various thresholds.
472 	 */
473 
474 	uvmexp.reserve_pagedaemon = 1;
475 	uvmexp.reserve_kernel = vm_page_reserve_kernel;
476 
477 	/*
478 	 * determine if we should zero pages in the idle loop.
479 	 */
480 
481 	uvm.cpus[0]->page_idle_zero = vm_page_zero_enable;
482 
483 	/*
484 	 * done!
485 	 */
486 
487 	uvm.page_init_done = true;
488 }
489 
490 /*
491  * uvm_setpagesize: set the page size
492  *
493  * => sets page_shift and page_mask from uvmexp.pagesize.
494  */
495 
496 void
497 uvm_setpagesize(void)
498 {
499 
500 	/*
501 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
502 	 * to be a constant (indicated by being a non-zero value).
503 	 */
504 	if (uvmexp.pagesize == 0) {
505 		if (PAGE_SIZE == 0)
506 			panic("uvm_setpagesize: uvmexp.pagesize not set");
507 		uvmexp.pagesize = PAGE_SIZE;
508 	}
509 	uvmexp.pagemask = uvmexp.pagesize - 1;
510 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
511 		panic("uvm_setpagesize: page size %u (%#x) not a power of two",
512 		    uvmexp.pagesize, uvmexp.pagesize);
513 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
514 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
515 			break;
516 }
517 
518 /*
519  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
520  */
521 
522 vaddr_t
523 uvm_pageboot_alloc(vsize_t size)
524 {
525 	static bool initialized = false;
526 	vaddr_t addr;
527 #if !defined(PMAP_STEAL_MEMORY)
528 	vaddr_t vaddr;
529 	paddr_t paddr;
530 #endif
531 
532 	/*
533 	 * on first call to this function, initialize ourselves.
534 	 */
535 	if (initialized == false) {
536 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
537 
538 		/* round it the way we like it */
539 		virtual_space_start = round_page(virtual_space_start);
540 		virtual_space_end = trunc_page(virtual_space_end);
541 
542 		initialized = true;
543 	}
544 
545 	/* round to page size */
546 	size = round_page(size);
547 
548 #if defined(PMAP_STEAL_MEMORY)
549 
550 	/*
551 	 * defer bootstrap allocation to MD code (it may want to allocate
552 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
553 	 * virtual_space_start/virtual_space_end if necessary.
554 	 */
555 
556 	addr = pmap_steal_memory(size, &virtual_space_start,
557 	    &virtual_space_end);
558 
559 	return(addr);
560 
561 #else /* !PMAP_STEAL_MEMORY */
562 
563 	/*
564 	 * allocate virtual memory for this request
565 	 */
566 	if (virtual_space_start == virtual_space_end ||
567 	    (virtual_space_end - virtual_space_start) < size)
568 		panic("uvm_pageboot_alloc: out of virtual space");
569 
570 	addr = virtual_space_start;
571 
572 #ifdef PMAP_GROWKERNEL
573 	/*
574 	 * If the kernel pmap can't map the requested space,
575 	 * then allocate more resources for it.
576 	 */
577 	if (uvm_maxkaddr < (addr + size)) {
578 		uvm_maxkaddr = pmap_growkernel(addr + size);
579 		if (uvm_maxkaddr < (addr + size))
580 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
581 	}
582 #endif
583 
584 	virtual_space_start += size;
585 
586 	/*
587 	 * allocate and mapin physical pages to back new virtual pages
588 	 */
589 
590 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
591 	    vaddr += PAGE_SIZE) {
592 
593 		if (!uvm_page_physget(&paddr))
594 			panic("uvm_pageboot_alloc: out of memory");
595 
596 		/*
597 		 * Note this memory is no longer managed, so using
598 		 * pmap_kenter is safe.
599 		 */
600 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
601 	}
602 	pmap_update(pmap_kernel());
603 	return(addr);
604 #endif	/* PMAP_STEAL_MEMORY */
605 }
606 
607 #if !defined(PMAP_STEAL_MEMORY)
608 /*
609  * uvm_page_physget: "steal" one page from the vm_physmem structure.
610  *
611  * => attempt to allocate it off the end of a segment in which the "avail"
612  *    values match the start/end values.   if we can't do that, then we
613  *    will advance both values (making them equal, and removing some
614  *    vm_page structures from the non-avail area).
615  * => return false if out of memory.
616  */
617 
618 /* subroutine: try to allocate from memory chunks on the specified freelist */
619 static bool uvm_page_physget_freelist(paddr_t *, int);
620 
621 static bool
622 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
623 {
624 	struct vm_physseg *seg;
625 	int lcv, x;
626 
627 	/* pass 1: try allocating from a matching end */
628 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
629 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
630 #else
631 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
632 #endif
633 	{
634 		seg = VM_PHYSMEM_PTR(lcv);
635 
636 		if (uvm.page_init_done == true)
637 			panic("uvm_page_physget: called _after_ bootstrap");
638 
639 		if (seg->free_list != freelist)
640 			continue;
641 
642 		/* try from front */
643 		if (seg->avail_start == seg->start &&
644 		    seg->avail_start < seg->avail_end) {
645 			*paddrp = ctob(seg->avail_start);
646 			seg->avail_start++;
647 			seg->start++;
648 			/* nothing left?   nuke it */
649 			if (seg->avail_start == seg->end) {
650 				if (vm_nphysmem == 1)
651 				    panic("uvm_page_physget: out of memory!");
652 				vm_nphysmem--;
653 				for (x = lcv ; x < vm_nphysmem ; x++)
654 					/* structure copy */
655 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
656 			}
657 			return (true);
658 		}
659 
660 		/* try from rear */
661 		if (seg->avail_end == seg->end &&
662 		    seg->avail_start < seg->avail_end) {
663 			*paddrp = ctob(seg->avail_end - 1);
664 			seg->avail_end--;
665 			seg->end--;
666 			/* nothing left?   nuke it */
667 			if (seg->avail_end == seg->start) {
668 				if (vm_nphysmem == 1)
669 				    panic("uvm_page_physget: out of memory!");
670 				vm_nphysmem--;
671 				for (x = lcv ; x < vm_nphysmem ; x++)
672 					/* structure copy */
673 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
674 			}
675 			return (true);
676 		}
677 	}
678 
679 	/* pass2: forget about matching ends, just allocate something */
680 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
681 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
682 #else
683 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
684 #endif
685 	{
686 		seg = VM_PHYSMEM_PTR(lcv);
687 
688 		/* any room in this bank? */
689 		if (seg->avail_start >= seg->avail_end)
690 			continue;  /* nope */
691 
692 		*paddrp = ctob(seg->avail_start);
693 		seg->avail_start++;
694 		/* truncate! */
695 		seg->start = seg->avail_start;
696 
697 		/* nothing left?   nuke it */
698 		if (seg->avail_start == seg->end) {
699 			if (vm_nphysmem == 1)
700 				panic("uvm_page_physget: out of memory!");
701 			vm_nphysmem--;
702 			for (x = lcv ; x < vm_nphysmem ; x++)
703 				/* structure copy */
704 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
705 		}
706 		return (true);
707 	}
708 
709 	return (false);        /* whoops! */
710 }
711 
712 bool
713 uvm_page_physget(paddr_t *paddrp)
714 {
715 	int i;
716 
717 	/* try in the order of freelist preference */
718 	for (i = 0; i < VM_NFREELIST; i++)
719 		if (uvm_page_physget_freelist(paddrp, i) == true)
720 			return (true);
721 	return (false);
722 }
723 #endif /* PMAP_STEAL_MEMORY */
724 
725 /*
726  * uvm_page_physload: load physical memory into VM system
727  *
728  * => all args are PFs
729  * => all pages in start/end get vm_page structures
730  * => areas marked by avail_start/avail_end get added to the free page pool
731  * => we are limited to VM_PHYSSEG_MAX physical memory segments
732  */
733 
734 void
735 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
736     paddr_t avail_end, int free_list)
737 {
738 	int preload, lcv;
739 	psize_t npages;
740 	struct vm_page *pgs;
741 	struct vm_physseg *ps;
742 
743 	if (uvmexp.pagesize == 0)
744 		panic("uvm_page_physload: page size not set!");
745 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
746 		panic("uvm_page_physload: bad free list %d", free_list);
747 	if (start >= end)
748 		panic("uvm_page_physload: start >= end");
749 
750 	/*
751 	 * do we have room?
752 	 */
753 
754 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
755 		printf("uvm_page_physload: unable to load physical memory "
756 		    "segment\n");
757 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
758 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
759 		printf("\tincrease VM_PHYSSEG_MAX\n");
760 		return;
761 	}
762 
763 	/*
764 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
765 	 * called yet, so kmem is not available).
766 	 */
767 
768 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
769 		if (VM_PHYSMEM_PTR(lcv)->pgs)
770 			break;
771 	}
772 	preload = (lcv == vm_nphysmem);
773 
774 	/*
775 	 * if VM is already running, attempt to kmem_alloc vm_page structures
776 	 */
777 
778 	if (!preload) {
779 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
780 	} else {
781 		pgs = NULL;
782 		npages = 0;
783 	}
784 
785 	/*
786 	 * now insert us in the proper place in vm_physmem[]
787 	 */
788 
789 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
790 	/* random: put it at the end (easy!) */
791 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
792 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
793 	{
794 		int x;
795 		/* sort by address for binary search */
796 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
797 			if (start < VM_PHYSMEM_PTR(lcv)->start)
798 				break;
799 		ps = VM_PHYSMEM_PTR(lcv);
800 		/* move back other entries, if necessary ... */
801 		for (x = vm_nphysmem ; x > lcv ; x--)
802 			/* structure copy */
803 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
804 	}
805 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
806 	{
807 		int x;
808 		/* sort by largest segment first */
809 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
810 			if ((end - start) >
811 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
812 				break;
813 		ps = VM_PHYSMEM_PTR(lcv);
814 		/* move back other entries, if necessary ... */
815 		for (x = vm_nphysmem ; x > lcv ; x--)
816 			/* structure copy */
817 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
818 	}
819 #else
820 	panic("uvm_page_physload: unknown physseg strategy selected!");
821 #endif
822 
823 	ps->start = start;
824 	ps->end = end;
825 	ps->avail_start = avail_start;
826 	ps->avail_end = avail_end;
827 	if (preload) {
828 		ps->pgs = NULL;
829 	} else {
830 		ps->pgs = pgs;
831 		ps->lastpg = pgs + npages;
832 	}
833 	ps->free_list = free_list;
834 	vm_nphysmem++;
835 
836 	if (!preload) {
837 		uvmpdpol_reinit();
838 	}
839 }
840 
841 /*
842  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
843  */
844 
845 #if VM_PHYSSEG_MAX == 1
846 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
847 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
848 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
849 #else
850 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
851 #endif
852 
853 /*
854  * vm_physseg_find: find vm_physseg structure that belongs to a PA
855  */
856 int
857 vm_physseg_find(paddr_t pframe, int *offp)
858 {
859 
860 #if VM_PHYSSEG_MAX == 1
861 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
862 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
863 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
864 #else
865 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
866 #endif
867 }
868 
869 #if VM_PHYSSEG_MAX == 1
870 static inline int
871 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
872 {
873 
874 	/* 'contig' case */
875 	if (pframe >= segs[0].start && pframe < segs[0].end) {
876 		if (offp)
877 			*offp = pframe - segs[0].start;
878 		return(0);
879 	}
880 	return(-1);
881 }
882 
883 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
884 
885 static inline int
886 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
887 {
888 	/* binary search for it */
889 	u_int	start, len, guess;
890 
891 	/*
892 	 * if try is too large (thus target is less than try) we reduce
893 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
894 	 *
895 	 * if the try is too small (thus target is greater than try) then
896 	 * we set the new start to be (try + 1).   this means we need to
897 	 * reduce the length to (round(len/2) - 1).
898 	 *
899 	 * note "adjust" below which takes advantage of the fact that
900 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
901 	 * for any value of len we may have
902 	 */
903 
904 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
905 		guess = start + (len / 2);	/* try in the middle */
906 
907 		/* start past our try? */
908 		if (pframe >= segs[guess].start) {
909 			/* was try correct? */
910 			if (pframe < segs[guess].end) {
911 				if (offp)
912 					*offp = pframe - segs[guess].start;
913 				return guess;            /* got it */
914 			}
915 			start = guess + 1;	/* next time, start here */
916 			len--;			/* "adjust" */
917 		} else {
918 			/*
919 			 * pframe before try, just reduce length of
920 			 * region, done in "for" loop
921 			 */
922 		}
923 	}
924 	return(-1);
925 }
926 
927 #else
928 
929 static inline int
930 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
931 {
932 	/* linear search for it */
933 	int	lcv;
934 
935 	for (lcv = 0; lcv < nsegs; lcv++) {
936 		if (pframe >= segs[lcv].start &&
937 		    pframe < segs[lcv].end) {
938 			if (offp)
939 				*offp = pframe - segs[lcv].start;
940 			return(lcv);		   /* got it */
941 		}
942 	}
943 	return(-1);
944 }
945 #endif
946 
947 /*
948  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
949  * back from an I/O mapping (ugh!).   used in some MD code as well.
950  */
951 struct vm_page *
952 uvm_phys_to_vm_page(paddr_t pa)
953 {
954 	paddr_t pf = atop(pa);
955 	int	off;
956 	int	psi;
957 
958 	psi = vm_physseg_find(pf, &off);
959 	if (psi != -1)
960 		return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
961 	return(NULL);
962 }
963 
964 paddr_t
965 uvm_vm_page_to_phys(const struct vm_page *pg)
966 {
967 
968 	return pg->phys_addr;
969 }
970 
971 /*
972  * uvm_page_recolor: Recolor the pages if the new bucket count is
973  * larger than the old one.
974  */
975 
976 void
977 uvm_page_recolor(int newncolors)
978 {
979 	struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
980 	struct pgfreelist gpgfl, pgfl;
981 	struct vm_page *pg;
982 	vsize_t bucketcount;
983 	size_t bucketmemsize, oldbucketmemsize;
984 	int lcv, color, i, ocolors;
985 	struct uvm_cpu *ucpu;
986 
987 	KASSERT(((newncolors - 1) & newncolors) == 0);
988 
989 	if (newncolors <= uvmexp.ncolors)
990 		return;
991 
992 	if (uvm.page_init_done == false) {
993 		uvmexp.ncolors = newncolors;
994 		return;
995 	}
996 
997 	bucketcount = newncolors * VM_NFREELIST;
998 	bucketmemsize = bucketcount * sizeof(struct pgflbucket) * 2;
999 	bucketarray = kmem_alloc(bucketmemsize, KM_SLEEP);
1000 	cpuarray = bucketarray + bucketcount;
1001 	if (bucketarray == NULL) {
1002 		printf("WARNING: unable to allocate %ld page color buckets\n",
1003 		    (long) bucketcount);
1004 		return;
1005 	}
1006 
1007 	mutex_spin_enter(&uvm_fpageqlock);
1008 
1009 	/* Make sure we should still do this. */
1010 	if (newncolors <= uvmexp.ncolors) {
1011 		mutex_spin_exit(&uvm_fpageqlock);
1012 		kmem_free(bucketarray, bucketmemsize);
1013 		return;
1014 	}
1015 
1016 	oldbucketarray = uvm.page_free[0].pgfl_buckets;
1017 	ocolors = uvmexp.ncolors;
1018 
1019 	uvmexp.ncolors = newncolors;
1020 	uvmexp.colormask = uvmexp.ncolors - 1;
1021 
1022 	ucpu = curcpu()->ci_data.cpu_uvm;
1023 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1024 		gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
1025 		pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
1026 		uvm_page_init_buckets(&gpgfl);
1027 		uvm_page_init_buckets(&pgfl);
1028 		for (color = 0; color < ocolors; color++) {
1029 			for (i = 0; i < PGFL_NQUEUES; i++) {
1030 				while ((pg = LIST_FIRST(&uvm.page_free[
1031 				    lcv].pgfl_buckets[color].pgfl_queues[i]))
1032 				    != NULL) {
1033 					LIST_REMOVE(pg, pageq.list); /* global */
1034 					LIST_REMOVE(pg, listq.list); /* cpu */
1035 					LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
1036 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1037 					    i], pg, pageq.list);
1038 					LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
1039 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1040 					    i], pg, listq.list);
1041 				}
1042 			}
1043 		}
1044 		uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
1045 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1046 	}
1047 
1048 	oldbucketmemsize = recolored_pages_memsize;
1049 
1050 	recolored_pages_memsize = bucketmemsize;
1051 	mutex_spin_exit(&uvm_fpageqlock);
1052 
1053 	if (oldbucketmemsize) {
1054 		kmem_free(oldbucketarray, recolored_pages_memsize);
1055 	}
1056 
1057 	/*
1058 	 * this calls uvm_km_alloc() which may want to hold
1059 	 * uvm_fpageqlock.
1060 	 */
1061 	uvm_pager_realloc_emerg();
1062 }
1063 
1064 /*
1065  * uvm_cpu_attach: initialize per-CPU data structures.
1066  */
1067 
1068 void
1069 uvm_cpu_attach(struct cpu_info *ci)
1070 {
1071 	struct pgflbucket *bucketarray;
1072 	struct pgfreelist pgfl;
1073 	struct uvm_cpu *ucpu;
1074 	vsize_t bucketcount;
1075 	int lcv;
1076 
1077 	if (CPU_IS_PRIMARY(ci)) {
1078 		/* Already done in uvm_page_init(). */
1079 		goto attachrnd;
1080 	}
1081 
1082 	/* Add more reserve pages for this CPU. */
1083 	uvmexp.reserve_kernel += vm_page_reserve_kernel;
1084 
1085 	/* Configure this CPU's free lists. */
1086 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
1087 	bucketarray = kmem_alloc(bucketcount * sizeof(struct pgflbucket),
1088 	    KM_SLEEP);
1089 	ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
1090 	uvm.cpus[cpu_index(ci)] = ucpu;
1091 	ci->ci_data.cpu_uvm = ucpu;
1092 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1093 		pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
1094 		uvm_page_init_buckets(&pgfl);
1095 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1096 	}
1097 
1098 attachrnd:
1099 	/*
1100 	 * Attach RNG source for this CPU's VM events
1101 	 */
1102         rnd_attach_source(&uvm.cpus[cpu_index(ci)]->rs,
1103 			  ci->ci_data.cpu_name, RND_TYPE_VM,
1104 			  RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
1105 			  RND_FLAG_ESTIMATE_VALUE);
1106 
1107 }
1108 
1109 /*
1110  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
1111  */
1112 
1113 static struct vm_page *
1114 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
1115     int *trycolorp)
1116 {
1117 	struct pgflist *freeq;
1118 	struct vm_page *pg;
1119 	int color, trycolor = *trycolorp;
1120 	struct pgfreelist *gpgfl, *pgfl;
1121 
1122 	KASSERT(mutex_owned(&uvm_fpageqlock));
1123 
1124 	color = trycolor;
1125 	pgfl = &ucpu->page_free[flist];
1126 	gpgfl = &uvm.page_free[flist];
1127 	do {
1128 		/* cpu, try1 */
1129 		if ((pg = LIST_FIRST((freeq =
1130 		    &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1131 			KASSERT(pg->pqflags & PQ_FREE);
1132 			KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
1133 			KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
1134 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
1135 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1136 		    	uvmexp.cpuhit++;
1137 			goto gotit;
1138 		}
1139 		/* global, try1 */
1140 		if ((pg = LIST_FIRST((freeq =
1141 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1142 			KASSERT(pg->pqflags & PQ_FREE);
1143 			KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
1144 			KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
1145 			KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
1146 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1147 		    	uvmexp.cpumiss++;
1148 			goto gotit;
1149 		}
1150 		/* cpu, try2 */
1151 		if ((pg = LIST_FIRST((freeq =
1152 		    &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1153 			KASSERT(pg->pqflags & PQ_FREE);
1154 			KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
1155 			KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
1156 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
1157 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1158 		    	uvmexp.cpuhit++;
1159 			goto gotit;
1160 		}
1161 		/* global, try2 */
1162 		if ((pg = LIST_FIRST((freeq =
1163 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1164 			KASSERT(pg->pqflags & PQ_FREE);
1165 			KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
1166 			KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
1167 			KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
1168 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1169 		    	uvmexp.cpumiss++;
1170 			goto gotit;
1171 		}
1172 		color = (color + 1) & uvmexp.colormask;
1173 	} while (color != trycolor);
1174 
1175 	return (NULL);
1176 
1177  gotit:
1178 	LIST_REMOVE(pg, pageq.list);	/* global list */
1179 	LIST_REMOVE(pg, listq.list);	/* per-cpu list */
1180 	uvmexp.free--;
1181 
1182 	/* update zero'd page count */
1183 	if (pg->flags & PG_ZERO)
1184 		uvmexp.zeropages--;
1185 
1186 	if (color == trycolor)
1187 		uvmexp.colorhit++;
1188 	else {
1189 		uvmexp.colormiss++;
1190 		*trycolorp = color;
1191 	}
1192 
1193 	return (pg);
1194 }
1195 
1196 /*
1197  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
1198  *
1199  * => return null if no pages free
1200  * => wake up pagedaemon if number of free pages drops below low water mark
1201  * => if obj != NULL, obj must be locked (to put in obj's tree)
1202  * => if anon != NULL, anon must be locked (to put in anon)
1203  * => only one of obj or anon can be non-null
1204  * => caller must activate/deactivate page if it is not wired.
1205  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
1206  * => policy decision: it is more important to pull a page off of the
1207  *	appropriate priority free list than it is to get a zero'd or
1208  *	unknown contents page.  This is because we live with the
1209  *	consequences of a bad free list decision for the entire
1210  *	lifetime of the page, e.g. if the page comes from memory that
1211  *	is slower to access.
1212  */
1213 
1214 struct vm_page *
1215 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
1216     int flags, int strat, int free_list)
1217 {
1218 	int lcv, try1, try2, zeroit = 0, color;
1219 	struct uvm_cpu *ucpu;
1220 	struct vm_page *pg;
1221 	lwp_t *l;
1222 
1223 	KASSERT(obj == NULL || anon == NULL);
1224 	KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
1225 	KASSERT(off == trunc_page(off));
1226 	KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
1227 	KASSERT(anon == NULL || anon->an_lock == NULL ||
1228 	    mutex_owned(anon->an_lock));
1229 
1230 	mutex_spin_enter(&uvm_fpageqlock);
1231 
1232 	/*
1233 	 * This implements a global round-robin page coloring
1234 	 * algorithm.
1235 	 */
1236 
1237 	ucpu = curcpu()->ci_data.cpu_uvm;
1238 	if (flags & UVM_FLAG_COLORMATCH) {
1239 		color = atop(off) & uvmexp.colormask;
1240 	} else {
1241 		color = ucpu->page_free_nextcolor;
1242 	}
1243 
1244 	/*
1245 	 * check to see if we need to generate some free pages waking
1246 	 * the pagedaemon.
1247 	 */
1248 
1249 	uvm_kick_pdaemon();
1250 
1251 	/*
1252 	 * fail if any of these conditions is true:
1253 	 * [1]  there really are no free pages, or
1254 	 * [2]  only kernel "reserved" pages remain and
1255 	 *        reserved pages have not been requested.
1256 	 * [3]  only pagedaemon "reserved" pages remain and
1257 	 *        the requestor isn't the pagedaemon.
1258 	 * we make kernel reserve pages available if called by a
1259 	 * kernel thread or a realtime thread.
1260 	 */
1261 	l = curlwp;
1262 	if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
1263 		flags |= UVM_PGA_USERESERVE;
1264 	}
1265 	if ((uvmexp.free <= uvmexp.reserve_kernel &&
1266 	    (flags & UVM_PGA_USERESERVE) == 0) ||
1267 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
1268 	     curlwp != uvm.pagedaemon_lwp))
1269 		goto fail;
1270 
1271 #if PGFL_NQUEUES != 2
1272 #error uvm_pagealloc_strat needs to be updated
1273 #endif
1274 
1275 	/*
1276 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
1277 	 * we try the UNKNOWN queue first.
1278 	 */
1279 	if (flags & UVM_PGA_ZERO) {
1280 		try1 = PGFL_ZEROS;
1281 		try2 = PGFL_UNKNOWN;
1282 	} else {
1283 		try1 = PGFL_UNKNOWN;
1284 		try2 = PGFL_ZEROS;
1285 	}
1286 
1287  again:
1288 	switch (strat) {
1289 	case UVM_PGA_STRAT_NORMAL:
1290 		/* Check freelists: descending priority (ascending id) order */
1291 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1292 			pg = uvm_pagealloc_pgfl(ucpu, lcv,
1293 			    try1, try2, &color);
1294 			if (pg != NULL)
1295 				goto gotit;
1296 		}
1297 
1298 		/* No pages free! */
1299 		goto fail;
1300 
1301 	case UVM_PGA_STRAT_ONLY:
1302 	case UVM_PGA_STRAT_FALLBACK:
1303 		/* Attempt to allocate from the specified free list. */
1304 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
1305 		pg = uvm_pagealloc_pgfl(ucpu, free_list,
1306 		    try1, try2, &color);
1307 		if (pg != NULL)
1308 			goto gotit;
1309 
1310 		/* Fall back, if possible. */
1311 		if (strat == UVM_PGA_STRAT_FALLBACK) {
1312 			strat = UVM_PGA_STRAT_NORMAL;
1313 			goto again;
1314 		}
1315 
1316 		/* No pages free! */
1317 		goto fail;
1318 
1319 	default:
1320 		panic("uvm_pagealloc_strat: bad strat %d", strat);
1321 		/* NOTREACHED */
1322 	}
1323 
1324  gotit:
1325 	/*
1326 	 * We now know which color we actually allocated from; set
1327 	 * the next color accordingly.
1328 	 */
1329 
1330 	ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
1331 
1332 	/*
1333 	 * update allocation statistics and remember if we have to
1334 	 * zero the page
1335 	 */
1336 
1337 	if (flags & UVM_PGA_ZERO) {
1338 		if (pg->flags & PG_ZERO) {
1339 			uvmexp.pga_zerohit++;
1340 			zeroit = 0;
1341 		} else {
1342 			uvmexp.pga_zeromiss++;
1343 			zeroit = 1;
1344 		}
1345 		if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1346 			ucpu->page_idle_zero = vm_page_zero_enable;
1347 		}
1348 	}
1349 	KASSERT(pg->pqflags == PQ_FREE);
1350 
1351 	pg->offset = off;
1352 	pg->uobject = obj;
1353 	pg->uanon = anon;
1354 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1355 	if (anon) {
1356 		anon->an_page = pg;
1357 		pg->pqflags = PQ_ANON;
1358 		atomic_inc_uint(&uvmexp.anonpages);
1359 	} else {
1360 		if (obj) {
1361 			uvm_pageinsert(obj, pg);
1362 		}
1363 		pg->pqflags = 0;
1364 	}
1365 	mutex_spin_exit(&uvm_fpageqlock);
1366 
1367 #if defined(UVM_PAGE_TRKOWN)
1368 	pg->owner_tag = NULL;
1369 #endif
1370 	UVM_PAGE_OWN(pg, "new alloc");
1371 
1372 	if (flags & UVM_PGA_ZERO) {
1373 		/*
1374 		 * A zero'd page is not clean.  If we got a page not already
1375 		 * zero'd, then we have to zero it ourselves.
1376 		 */
1377 		pg->flags &= ~PG_CLEAN;
1378 		if (zeroit)
1379 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1380 	}
1381 
1382 	return(pg);
1383 
1384  fail:
1385 	mutex_spin_exit(&uvm_fpageqlock);
1386 	return (NULL);
1387 }
1388 
1389 /*
1390  * uvm_pagereplace: replace a page with another
1391  *
1392  * => object must be locked
1393  */
1394 
1395 void
1396 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
1397 {
1398 	struct uvm_object *uobj = oldpg->uobject;
1399 
1400 	KASSERT((oldpg->flags & PG_TABLED) != 0);
1401 	KASSERT(uobj != NULL);
1402 	KASSERT((newpg->flags & PG_TABLED) == 0);
1403 	KASSERT(newpg->uobject == NULL);
1404 	KASSERT(mutex_owned(uobj->vmobjlock));
1405 
1406 	newpg->uobject = uobj;
1407 	newpg->offset = oldpg->offset;
1408 
1409 	uvm_pageremove_tree(uobj, oldpg);
1410 	uvm_pageinsert_tree(uobj, newpg);
1411 	uvm_pageinsert_list(uobj, newpg, oldpg);
1412 	uvm_pageremove_list(uobj, oldpg);
1413 }
1414 
1415 /*
1416  * uvm_pagerealloc: reallocate a page from one object to another
1417  *
1418  * => both objects must be locked
1419  */
1420 
1421 void
1422 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1423 {
1424 	/*
1425 	 * remove it from the old object
1426 	 */
1427 
1428 	if (pg->uobject) {
1429 		uvm_pageremove(pg->uobject, pg);
1430 	}
1431 
1432 	/*
1433 	 * put it in the new object
1434 	 */
1435 
1436 	if (newobj) {
1437 		pg->uobject = newobj;
1438 		pg->offset = newoff;
1439 		uvm_pageinsert(newobj, pg);
1440 	}
1441 }
1442 
1443 #ifdef DEBUG
1444 /*
1445  * check if page is zero-filled
1446  *
1447  *  - called with free page queue lock held.
1448  */
1449 void
1450 uvm_pagezerocheck(struct vm_page *pg)
1451 {
1452 	int *p, *ep;
1453 
1454 	KASSERT(uvm_zerocheckkva != 0);
1455 	KASSERT(mutex_owned(&uvm_fpageqlock));
1456 
1457 	/*
1458 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
1459 	 * uvm page allocator.
1460 	 *
1461 	 * it might be better to have "CPU-local temporary map" pmap interface.
1462 	 */
1463 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
1464 	p = (int *)uvm_zerocheckkva;
1465 	ep = (int *)((char *)p + PAGE_SIZE);
1466 	pmap_update(pmap_kernel());
1467 	while (p < ep) {
1468 		if (*p != 0)
1469 			panic("PG_ZERO page isn't zero-filled");
1470 		p++;
1471 	}
1472 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
1473 	/*
1474 	 * pmap_update() is not necessary here because no one except us
1475 	 * uses this VA.
1476 	 */
1477 }
1478 #endif /* DEBUG */
1479 
1480 /*
1481  * uvm_pagefree: free page
1482  *
1483  * => erase page's identity (i.e. remove from object)
1484  * => put page on free list
1485  * => caller must lock owning object (either anon or uvm_object)
1486  * => caller must lock page queues
1487  * => assumes all valid mappings of pg are gone
1488  */
1489 
1490 void
1491 uvm_pagefree(struct vm_page *pg)
1492 {
1493 	struct pgflist *pgfl;
1494 	struct uvm_cpu *ucpu;
1495 	int index, color, queue;
1496 	bool iszero;
1497 
1498 #ifdef DEBUG
1499 	if (pg->uobject == (void *)0xdeadbeef &&
1500 	    pg->uanon == (void *)0xdeadbeef) {
1501 		panic("uvm_pagefree: freeing free page %p", pg);
1502 	}
1503 #endif /* DEBUG */
1504 
1505 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
1506 	KASSERT(!(pg->pqflags & PQ_FREE));
1507 	//KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
1508 	KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
1509 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
1510 		mutex_owned(pg->uanon->an_lock));
1511 
1512 	/*
1513 	 * if the page is loaned, resolve the loan instead of freeing.
1514 	 */
1515 
1516 	if (pg->loan_count) {
1517 		KASSERT(pg->wire_count == 0);
1518 
1519 		/*
1520 		 * if the page is owned by an anon then we just want to
1521 		 * drop anon ownership.  the kernel will free the page when
1522 		 * it is done with it.  if the page is owned by an object,
1523 		 * remove it from the object and mark it dirty for the benefit
1524 		 * of possible anon owners.
1525 		 *
1526 		 * regardless of previous ownership, wakeup any waiters,
1527 		 * unbusy the page, and we're done.
1528 		 */
1529 
1530 		if (pg->uobject != NULL) {
1531 			uvm_pageremove(pg->uobject, pg);
1532 			pg->flags &= ~PG_CLEAN;
1533 		} else if (pg->uanon != NULL) {
1534 			if ((pg->pqflags & PQ_ANON) == 0) {
1535 				pg->loan_count--;
1536 			} else {
1537 				pg->pqflags &= ~PQ_ANON;
1538 				atomic_dec_uint(&uvmexp.anonpages);
1539 			}
1540 			pg->uanon->an_page = NULL;
1541 			pg->uanon = NULL;
1542 		}
1543 		if (pg->flags & PG_WANTED) {
1544 			wakeup(pg);
1545 		}
1546 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
1547 #ifdef UVM_PAGE_TRKOWN
1548 		pg->owner_tag = NULL;
1549 #endif
1550 		if (pg->loan_count) {
1551 			KASSERT(pg->uobject == NULL);
1552 			if (pg->uanon == NULL) {
1553 				KASSERT(mutex_owned(&uvm_pageqlock));
1554 				uvm_pagedequeue(pg);
1555 			}
1556 			return;
1557 		}
1558 	}
1559 
1560 	/*
1561 	 * remove page from its object or anon.
1562 	 */
1563 
1564 	if (pg->uobject != NULL) {
1565 		uvm_pageremove(pg->uobject, pg);
1566 	} else if (pg->uanon != NULL) {
1567 		pg->uanon->an_page = NULL;
1568 		atomic_dec_uint(&uvmexp.anonpages);
1569 	}
1570 
1571 	/*
1572 	 * now remove the page from the queues.
1573 	 */
1574 	if (uvmpdpol_pageisqueued_p(pg)) {
1575 		KASSERT(mutex_owned(&uvm_pageqlock));
1576 		uvm_pagedequeue(pg);
1577 	}
1578 
1579 	/*
1580 	 * if the page was wired, unwire it now.
1581 	 */
1582 
1583 	if (pg->wire_count) {
1584 		pg->wire_count = 0;
1585 		uvmexp.wired--;
1586 	}
1587 
1588 	/*
1589 	 * and put on free queue
1590 	 */
1591 
1592 	iszero = (pg->flags & PG_ZERO);
1593 	index = uvm_page_lookup_freelist(pg);
1594 	color = VM_PGCOLOR_BUCKET(pg);
1595 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
1596 
1597 #ifdef DEBUG
1598 	pg->uobject = (void *)0xdeadbeef;
1599 	pg->uanon = (void *)0xdeadbeef;
1600 #endif
1601 
1602 	mutex_spin_enter(&uvm_fpageqlock);
1603 	pg->pqflags = PQ_FREE;
1604 
1605 #ifdef DEBUG
1606 	if (iszero)
1607 		uvm_pagezerocheck(pg);
1608 #endif /* DEBUG */
1609 
1610 
1611 	/* global list */
1612 	pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1613 	LIST_INSERT_HEAD(pgfl, pg, pageq.list);
1614 	uvmexp.free++;
1615 	if (iszero) {
1616 		uvmexp.zeropages++;
1617 	}
1618 
1619 	/* per-cpu list */
1620 	ucpu = curcpu()->ci_data.cpu_uvm;
1621 	pg->offset = (uintptr_t)ucpu;
1622 	pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1623 	LIST_INSERT_HEAD(pgfl, pg, listq.list);
1624 	ucpu->pages[queue]++;
1625 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1626 		ucpu->page_idle_zero = vm_page_zero_enable;
1627 	}
1628 
1629 	mutex_spin_exit(&uvm_fpageqlock);
1630 }
1631 
1632 /*
1633  * uvm_page_unbusy: unbusy an array of pages.
1634  *
1635  * => pages must either all belong to the same object, or all belong to anons.
1636  * => if pages are object-owned, object must be locked.
1637  * => if pages are anon-owned, anons must be locked.
1638  * => caller must lock page queues if pages may be released.
1639  * => caller must make sure that anon-owned pages are not PG_RELEASED.
1640  */
1641 
1642 void
1643 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1644 {
1645 	struct vm_page *pg;
1646 	int i;
1647 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
1648 
1649 	for (i = 0; i < npgs; i++) {
1650 		pg = pgs[i];
1651 		if (pg == NULL || pg == PGO_DONTCARE) {
1652 			continue;
1653 		}
1654 
1655 		KASSERT(uvm_page_locked_p(pg));
1656 		KASSERT(pg->flags & PG_BUSY);
1657 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
1658 		if (pg->flags & PG_WANTED) {
1659 			wakeup(pg);
1660 		}
1661 		if (pg->flags & PG_RELEASED) {
1662 			UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
1663 			KASSERT(pg->uobject != NULL ||
1664 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
1665 			pg->flags &= ~PG_RELEASED;
1666 			uvm_pagefree(pg);
1667 		} else {
1668 			UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
1669 			KASSERT((pg->flags & PG_FAKE) == 0);
1670 			pg->flags &= ~(PG_WANTED|PG_BUSY);
1671 			UVM_PAGE_OWN(pg, NULL);
1672 		}
1673 	}
1674 }
1675 
1676 #if defined(UVM_PAGE_TRKOWN)
1677 /*
1678  * uvm_page_own: set or release page ownership
1679  *
1680  * => this is a debugging function that keeps track of who sets PG_BUSY
1681  *	and where they do it.   it can be used to track down problems
1682  *	such a process setting "PG_BUSY" and never releasing it.
1683  * => page's object [if any] must be locked
1684  * => if "tag" is NULL then we are releasing page ownership
1685  */
1686 void
1687 uvm_page_own(struct vm_page *pg, const char *tag)
1688 {
1689 
1690 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
1691 	KASSERT((pg->flags & PG_WANTED) == 0);
1692 	KASSERT(uvm_page_locked_p(pg));
1693 
1694 	/* gain ownership? */
1695 	if (tag) {
1696 		KASSERT((pg->flags & PG_BUSY) != 0);
1697 		if (pg->owner_tag) {
1698 			printf("uvm_page_own: page %p already owned "
1699 			    "by proc %d [%s]\n", pg,
1700 			    pg->owner, pg->owner_tag);
1701 			panic("uvm_page_own");
1702 		}
1703 		pg->owner = curproc->p_pid;
1704 		pg->lowner = curlwp->l_lid;
1705 		pg->owner_tag = tag;
1706 		return;
1707 	}
1708 
1709 	/* drop ownership */
1710 	KASSERT((pg->flags & PG_BUSY) == 0);
1711 	if (pg->owner_tag == NULL) {
1712 		printf("uvm_page_own: dropping ownership of an non-owned "
1713 		    "page (%p)\n", pg);
1714 		panic("uvm_page_own");
1715 	}
1716 	if (!uvmpdpol_pageisqueued_p(pg)) {
1717 		KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
1718 		    pg->wire_count > 0);
1719 	} else {
1720 		KASSERT(pg->wire_count == 0);
1721 	}
1722 	pg->owner_tag = NULL;
1723 }
1724 #endif
1725 
1726 /*
1727  * uvm_pageidlezero: zero free pages while the system is idle.
1728  *
1729  * => try to complete one color bucket at a time, to reduce our impact
1730  *	on the CPU cache.
1731  * => we loop until we either reach the target or there is a lwp ready
1732  *      to run, or MD code detects a reason to break early.
1733  */
1734 void
1735 uvm_pageidlezero(void)
1736 {
1737 	struct vm_page *pg;
1738 	struct pgfreelist *pgfl, *gpgfl;
1739 	struct uvm_cpu *ucpu;
1740 	int free_list, firstbucket, nextbucket;
1741 	bool lcont = false;
1742 
1743 	ucpu = curcpu()->ci_data.cpu_uvm;
1744 	if (!ucpu->page_idle_zero ||
1745 	    ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1746 	    	ucpu->page_idle_zero = false;
1747 		return;
1748 	}
1749 	if (!mutex_tryenter(&uvm_fpageqlock)) {
1750 		/* Contention: let other CPUs to use the lock. */
1751 		return;
1752 	}
1753 	firstbucket = ucpu->page_free_nextcolor;
1754 	nextbucket = firstbucket;
1755 	do {
1756 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1757 			if (sched_curcpu_runnable_p()) {
1758 				goto quit;
1759 			}
1760 			pgfl = &ucpu->page_free[free_list];
1761 			gpgfl = &uvm.page_free[free_list];
1762 			while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
1763 			    nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
1764 				if (lcont || sched_curcpu_runnable_p()) {
1765 					goto quit;
1766 				}
1767 				LIST_REMOVE(pg, pageq.list); /* global list */
1768 				LIST_REMOVE(pg, listq.list); /* per-cpu list */
1769 				ucpu->pages[PGFL_UNKNOWN]--;
1770 				uvmexp.free--;
1771 				KASSERT(pg->pqflags == PQ_FREE);
1772 				pg->pqflags = 0;
1773 				mutex_spin_exit(&uvm_fpageqlock);
1774 #ifdef PMAP_PAGEIDLEZERO
1775 				if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
1776 
1777 					/*
1778 					 * The machine-dependent code detected
1779 					 * some reason for us to abort zeroing
1780 					 * pages, probably because there is a
1781 					 * process now ready to run.
1782 					 */
1783 
1784 					mutex_spin_enter(&uvm_fpageqlock);
1785 					pg->pqflags = PQ_FREE;
1786 					LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1787 					    nextbucket].pgfl_queues[
1788 					    PGFL_UNKNOWN], pg, pageq.list);
1789 					LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1790 					    nextbucket].pgfl_queues[
1791 					    PGFL_UNKNOWN], pg, listq.list);
1792 					ucpu->pages[PGFL_UNKNOWN]++;
1793 					uvmexp.free++;
1794 					uvmexp.zeroaborts++;
1795 					goto quit;
1796 				}
1797 #else
1798 				pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1799 #endif /* PMAP_PAGEIDLEZERO */
1800 				pg->flags |= PG_ZERO;
1801 
1802 				if (!mutex_tryenter(&uvm_fpageqlock)) {
1803 					lcont = true;
1804 					mutex_spin_enter(&uvm_fpageqlock);
1805 				} else {
1806 					lcont = false;
1807 				}
1808 				pg->pqflags = PQ_FREE;
1809 				LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1810 				    nextbucket].pgfl_queues[PGFL_ZEROS],
1811 				    pg, pageq.list);
1812 				LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1813 				    nextbucket].pgfl_queues[PGFL_ZEROS],
1814 				    pg, listq.list);
1815 				ucpu->pages[PGFL_ZEROS]++;
1816 				uvmexp.free++;
1817 				uvmexp.zeropages++;
1818 			}
1819 		}
1820 		if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1821 			break;
1822 		}
1823 		nextbucket = (nextbucket + 1) & uvmexp.colormask;
1824 	} while (nextbucket != firstbucket);
1825 	ucpu->page_idle_zero = false;
1826  quit:
1827 	mutex_spin_exit(&uvm_fpageqlock);
1828 }
1829 
1830 /*
1831  * uvm_pagelookup: look up a page
1832  *
1833  * => caller should lock object to keep someone from pulling the page
1834  *	out from under it
1835  */
1836 
1837 struct vm_page *
1838 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1839 {
1840 	struct vm_page *pg;
1841 
1842 	KASSERT(mutex_owned(obj->vmobjlock));
1843 
1844 	pg = rb_tree_find_node(&obj->rb_tree, &off);
1845 
1846 	KASSERT(pg == NULL || obj->uo_npages != 0);
1847 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1848 		(pg->flags & PG_BUSY) != 0);
1849 	return pg;
1850 }
1851 
1852 /*
1853  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1854  *
1855  * => caller must lock page queues
1856  */
1857 
1858 void
1859 uvm_pagewire(struct vm_page *pg)
1860 {
1861 	KASSERT(mutex_owned(&uvm_pageqlock));
1862 #if defined(READAHEAD_STATS)
1863 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
1864 		uvm_ra_hit.ev_count++;
1865 		pg->pqflags &= ~PQ_READAHEAD;
1866 	}
1867 #endif /* defined(READAHEAD_STATS) */
1868 	if (pg->wire_count == 0) {
1869 		uvm_pagedequeue(pg);
1870 		uvmexp.wired++;
1871 	}
1872 	pg->wire_count++;
1873 }
1874 
1875 /*
1876  * uvm_pageunwire: unwire the page.
1877  *
1878  * => activate if wire count goes to zero.
1879  * => caller must lock page queues
1880  */
1881 
1882 void
1883 uvm_pageunwire(struct vm_page *pg)
1884 {
1885 	KASSERT(mutex_owned(&uvm_pageqlock));
1886 	pg->wire_count--;
1887 	if (pg->wire_count == 0) {
1888 		uvm_pageactivate(pg);
1889 		uvmexp.wired--;
1890 	}
1891 }
1892 
1893 /*
1894  * uvm_pagedeactivate: deactivate page
1895  *
1896  * => caller must lock page queues
1897  * => caller must check to make sure page is not wired
1898  * => object that page belongs to must be locked (so we can adjust pg->flags)
1899  * => caller must clear the reference on the page before calling
1900  */
1901 
1902 void
1903 uvm_pagedeactivate(struct vm_page *pg)
1904 {
1905 
1906 	KASSERT(mutex_owned(&uvm_pageqlock));
1907 	KASSERT(uvm_page_locked_p(pg));
1908 	KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
1909 	uvmpdpol_pagedeactivate(pg);
1910 }
1911 
1912 /*
1913  * uvm_pageactivate: activate page
1914  *
1915  * => caller must lock page queues
1916  */
1917 
1918 void
1919 uvm_pageactivate(struct vm_page *pg)
1920 {
1921 
1922 	KASSERT(mutex_owned(&uvm_pageqlock));
1923 	KASSERT(uvm_page_locked_p(pg));
1924 #if defined(READAHEAD_STATS)
1925 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
1926 		uvm_ra_hit.ev_count++;
1927 		pg->pqflags &= ~PQ_READAHEAD;
1928 	}
1929 #endif /* defined(READAHEAD_STATS) */
1930 	if (pg->wire_count != 0) {
1931 		return;
1932 	}
1933 	uvmpdpol_pageactivate(pg);
1934 }
1935 
1936 /*
1937  * uvm_pagedequeue: remove a page from any paging queue
1938  */
1939 
1940 void
1941 uvm_pagedequeue(struct vm_page *pg)
1942 {
1943 
1944 	if (uvmpdpol_pageisqueued_p(pg)) {
1945 		KASSERT(mutex_owned(&uvm_pageqlock));
1946 	}
1947 
1948 	uvmpdpol_pagedequeue(pg);
1949 }
1950 
1951 /*
1952  * uvm_pageenqueue: add a page to a paging queue without activating.
1953  * used where a page is not really demanded (yet).  eg. read-ahead
1954  */
1955 
1956 void
1957 uvm_pageenqueue(struct vm_page *pg)
1958 {
1959 
1960 	KASSERT(mutex_owned(&uvm_pageqlock));
1961 	if (pg->wire_count != 0) {
1962 		return;
1963 	}
1964 	uvmpdpol_pageenqueue(pg);
1965 }
1966 
1967 /*
1968  * uvm_pagezero: zero fill a page
1969  *
1970  * => if page is part of an object then the object should be locked
1971  *	to protect pg->flags.
1972  */
1973 
1974 void
1975 uvm_pagezero(struct vm_page *pg)
1976 {
1977 	pg->flags &= ~PG_CLEAN;
1978 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1979 }
1980 
1981 /*
1982  * uvm_pagecopy: copy a page
1983  *
1984  * => if page is part of an object then the object should be locked
1985  *	to protect pg->flags.
1986  */
1987 
1988 void
1989 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1990 {
1991 
1992 	dst->flags &= ~PG_CLEAN;
1993 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
1994 }
1995 
1996 /*
1997  * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
1998  */
1999 
2000 bool
2001 uvm_pageismanaged(paddr_t pa)
2002 {
2003 
2004 	return (vm_physseg_find(atop(pa), NULL) != -1);
2005 }
2006 
2007 /*
2008  * uvm_page_lookup_freelist: look up the free list for the specified page
2009  */
2010 
2011 int
2012 uvm_page_lookup_freelist(struct vm_page *pg)
2013 {
2014 	int lcv;
2015 
2016 	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
2017 	KASSERT(lcv != -1);
2018 	return (VM_PHYSMEM_PTR(lcv)->free_list);
2019 }
2020 
2021 /*
2022  * uvm_page_locked_p: return true if object associated with page is
2023  * locked.  this is a weak check for runtime assertions only.
2024  */
2025 
2026 bool
2027 uvm_page_locked_p(struct vm_page *pg)
2028 {
2029 
2030 	if (pg->uobject != NULL) {
2031 		return mutex_owned(pg->uobject->vmobjlock);
2032 	}
2033 	if (pg->uanon != NULL) {
2034 		return mutex_owned(pg->uanon->an_lock);
2035 	}
2036 	return true;
2037 }
2038 
2039 #if defined(DDB) || defined(DEBUGPRINT)
2040 
2041 /*
2042  * uvm_page_printit: actually print the page
2043  */
2044 
2045 static const char page_flagbits[] = UVM_PGFLAGBITS;
2046 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
2047 
2048 void
2049 uvm_page_printit(struct vm_page *pg, bool full,
2050     void (*pr)(const char *, ...))
2051 {
2052 	struct vm_page *tpg;
2053 	struct uvm_object *uobj;
2054 	struct pgflist *pgl;
2055 	char pgbuf[128];
2056 	char pqbuf[128];
2057 
2058 	(*pr)("PAGE %p:\n", pg);
2059 	snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
2060 	snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
2061 	(*pr)("  flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
2062 	    pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
2063 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
2064 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
2065 #if defined(UVM_PAGE_TRKOWN)
2066 	if (pg->flags & PG_BUSY)
2067 		(*pr)("  owning process = %d, tag=%s\n",
2068 		    pg->owner, pg->owner_tag);
2069 	else
2070 		(*pr)("  page not busy, no owner\n");
2071 #else
2072 	(*pr)("  [page ownership tracking disabled]\n");
2073 #endif
2074 
2075 	if (!full)
2076 		return;
2077 
2078 	/* cross-verify object/anon */
2079 	if ((pg->pqflags & PQ_FREE) == 0) {
2080 		if (pg->pqflags & PQ_ANON) {
2081 			if (pg->uanon == NULL || pg->uanon->an_page != pg)
2082 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
2083 				(pg->uanon) ? pg->uanon->an_page : NULL);
2084 			else
2085 				(*pr)("  anon backpointer is OK\n");
2086 		} else {
2087 			uobj = pg->uobject;
2088 			if (uobj) {
2089 				(*pr)("  checking object list\n");
2090 				TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
2091 					if (tpg == pg) {
2092 						break;
2093 					}
2094 				}
2095 				if (tpg)
2096 					(*pr)("  page found on object list\n");
2097 				else
2098 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
2099 			}
2100 		}
2101 	}
2102 
2103 	/* cross-verify page queue */
2104 	if (pg->pqflags & PQ_FREE) {
2105 		int fl = uvm_page_lookup_freelist(pg);
2106 		int color = VM_PGCOLOR_BUCKET(pg);
2107 		pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
2108 		    ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
2109 	} else {
2110 		pgl = NULL;
2111 	}
2112 
2113 	if (pgl) {
2114 		(*pr)("  checking pageq list\n");
2115 		LIST_FOREACH(tpg, pgl, pageq.list) {
2116 			if (tpg == pg) {
2117 				break;
2118 			}
2119 		}
2120 		if (tpg)
2121 			(*pr)("  page found on pageq list\n");
2122 		else
2123 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
2124 	}
2125 }
2126 
2127 /*
2128  * uvm_pages_printthem - print a summary of all managed pages
2129  */
2130 
2131 void
2132 uvm_page_printall(void (*pr)(const char *, ...))
2133 {
2134 	unsigned i;
2135 	struct vm_page *pg;
2136 
2137 	(*pr)("%18s %4s %4s %18s %18s"
2138 #ifdef UVM_PAGE_TRKOWN
2139 	    " OWNER"
2140 #endif
2141 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
2142 	for (i = 0; i < vm_nphysmem; i++) {
2143 		for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
2144 			(*pr)("%18p %04x %04x %18p %18p",
2145 			    pg, pg->flags, pg->pqflags, pg->uobject,
2146 			    pg->uanon);
2147 #ifdef UVM_PAGE_TRKOWN
2148 			if (pg->flags & PG_BUSY)
2149 				(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
2150 #endif
2151 			(*pr)("\n");
2152 		}
2153 	}
2154 }
2155 
2156 #endif /* DDB || DEBUGPRINT */
2157