xref: /dflybsd-src/sys/vm/vm_fault.c (revision 023c80aedb3e2e509d65a897a1cbd48d14bcb206)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
40  *
41  *
42  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43  * All rights reserved.
44  *
45  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46  *
47  * Permission to use, copy, modify and distribute this software and
48  * its documentation is hereby granted, provided that both the copyright
49  * notice and this permission notice appear in all copies of the
50  * software, derivative works or modified versions, and any portions
51  * thereof, and that both notices appear in supporting documentation.
52  *
53  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56  *
57  * Carnegie Mellon requests users of this software to return to
58  *
59  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
60  *  School of Computer Science
61  *  Carnegie Mellon University
62  *  Pittsburgh PA 15213-3890
63  *
64  * any improvements or extensions that they make and grant Carnegie the
65  * rights to redistribute these changes.
66  *
67  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
68  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
69  */
70 
71 /*
72  *	Page fault handling module.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/proc.h>
79 #include <sys/vnode.h>
80 #include <sys/resourcevar.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vkernel.h>
83 #include <sys/lock.h>
84 #include <sys/sysctl.h>
85 
86 #include <cpu/lwbuf.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
99 
100 #include <sys/thread2.h>
101 #include <vm/vm_page2.h>
102 
103 struct faultstate {
104 	vm_page_t m;
105 	vm_object_t object;
106 	vm_pindex_t pindex;
107 	vm_prot_t prot;
108 	vm_page_t first_m;
109 	vm_object_t first_object;
110 	vm_prot_t first_prot;
111 	vm_map_t map;
112 	vm_map_entry_t entry;
113 	int lookup_still_valid;
114 	int hardfault;
115 	int fault_flags;
116 	int map_generation;
117 	int shared;
118 	boolean_t wired;
119 	struct vnode *vp;
120 };
121 
122 static int debug_cluster = 0;
123 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
124 int vm_shared_fault = 1;
125 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0,
126 	   "Allow shared token on vm_object");
127 static long vm_shared_hit = 0;
128 SYSCTL_LONG(_vm, OID_AUTO, shared_hit, CTLFLAG_RW, &vm_shared_hit, 0,
129 	   "Successful shared faults");
130 static long vm_shared_miss = 0;
131 SYSCTL_LONG(_vm, OID_AUTO, shared_miss, CTLFLAG_RW, &vm_shared_miss, 0,
132 	   "Unsuccessful shared faults");
133 
134 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int);
135 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *,
136 			vpte_t, int, int);
137 #if 0
138 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
139 #endif
140 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
141 static void vm_prefault(pmap_t pmap, vm_offset_t addra,
142 			vm_map_entry_t entry, int prot, int fault_flags);
143 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
144 			vm_map_entry_t entry, int prot, int fault_flags);
145 
146 static __inline void
147 release_page(struct faultstate *fs)
148 {
149 	vm_page_deactivate(fs->m);
150 	vm_page_wakeup(fs->m);
151 	fs->m = NULL;
152 }
153 
154 /*
155  * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse
156  *	 requires relocking and then checking the timestamp.
157  *
158  * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do
159  *	 not have to update fs->map_generation here.
160  *
161  * NOTE: This function can fail due to a deadlock against the caller's
162  *	 holding of a vm_page BUSY.
163  */
164 static __inline int
165 relock_map(struct faultstate *fs)
166 {
167 	int error;
168 
169 	if (fs->lookup_still_valid == FALSE && fs->map) {
170 		error = vm_map_lock_read_to(fs->map);
171 		if (error == 0)
172 			fs->lookup_still_valid = TRUE;
173 	} else {
174 		error = 0;
175 	}
176 	return error;
177 }
178 
179 static __inline void
180 unlock_map(struct faultstate *fs)
181 {
182 	if (fs->lookup_still_valid && fs->map) {
183 		vm_map_lookup_done(fs->map, fs->entry, 0);
184 		fs->lookup_still_valid = FALSE;
185 	}
186 }
187 
188 /*
189  * Clean up after a successful call to vm_fault_object() so another call
190  * to vm_fault_object() can be made.
191  */
192 static void
193 _cleanup_successful_fault(struct faultstate *fs, int relock)
194 {
195 	if (fs->object != fs->first_object) {
196 		vm_page_free(fs->first_m);
197 		vm_object_pip_wakeup(fs->object);
198 		fs->first_m = NULL;
199 	}
200 	fs->object = fs->first_object;
201 	if (relock && fs->lookup_still_valid == FALSE) {
202 		if (fs->map)
203 			vm_map_lock_read(fs->map);
204 		fs->lookup_still_valid = TRUE;
205 	}
206 }
207 
208 static void
209 _unlock_things(struct faultstate *fs, int dealloc)
210 {
211 	_cleanup_successful_fault(fs, 0);
212 	if (dealloc) {
213 		/*vm_object_deallocate(fs->first_object);*/
214 		/*fs->first_object = NULL; drop used later on */
215 	}
216 	unlock_map(fs);
217 	if (fs->vp != NULL) {
218 		vput(fs->vp);
219 		fs->vp = NULL;
220 	}
221 }
222 
223 #define unlock_things(fs) _unlock_things(fs, 0)
224 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
225 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
226 
227 /*
228  * TRYPAGER
229  *
230  * Determine if the pager for the current object *might* contain the page.
231  *
232  * We only need to try the pager if this is not a default object (default
233  * objects are zero-fill and have no real pager), and if we are not taking
234  * a wiring fault or if the FS entry is wired.
235  */
236 #define TRYPAGER(fs)	\
237 		(fs->object->type != OBJT_DEFAULT && \
238 		(((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
239 
240 /*
241  * vm_fault:
242  *
243  * Handle a page fault occuring at the given address, requiring the given
244  * permissions, in the map specified.  If successful, the page is inserted
245  * into the associated physical map.
246  *
247  * NOTE: The given address should be truncated to the proper page address.
248  *
249  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
250  * a standard error specifying why the fault is fatal is returned.
251  *
252  * The map in question must be referenced, and remains so.
253  * The caller may hold no locks.
254  * No other requirements.
255  */
256 int
257 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
258 {
259 	int result;
260 	vm_pindex_t first_pindex;
261 	struct faultstate fs;
262 	struct lwp *lp;
263 	int growstack;
264 	int retry = 0;
265 
266 	vm_page_pcpu_cache();
267 	fs.hardfault = 0;
268 	fs.fault_flags = fault_flags;
269 	fs.vp = NULL;
270 	growstack = 1;
271 
272 	if ((lp = curthread->td_lwp) != NULL)
273 		lp->lwp_flags |= LWP_PAGING;
274 
275 	lwkt_gettoken(&map->token);
276 
277 RetryFault:
278 	/*
279 	 * Find the vm_map_entry representing the backing store and resolve
280 	 * the top level object and page index.  This may have the side
281 	 * effect of executing a copy-on-write on the map entry and/or
282 	 * creating a shadow object, but will not COW any actual VM pages.
283 	 *
284 	 * On success fs.map is left read-locked and various other fields
285 	 * are initialized but not otherwise referenced or locked.
286 	 *
287 	 * NOTE!  vm_map_lookup will try to upgrade the fault_type to
288 	 * VM_FAULT_WRITE if the map entry is a virtual page table and also
289 	 * writable, so we can set the 'A'accessed bit in the virtual page
290 	 * table entry.
291 	 */
292 	fs.map = map;
293 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
294 			       &fs.entry, &fs.first_object,
295 			       &first_pindex, &fs.first_prot, &fs.wired);
296 
297 	/*
298 	 * If the lookup failed or the map protections are incompatible,
299 	 * the fault generally fails.  However, if the caller is trying
300 	 * to do a user wiring we have more work to do.
301 	 */
302 	if (result != KERN_SUCCESS) {
303 		if (result != KERN_PROTECTION_FAILURE ||
304 		    (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
305 		{
306 			if (result == KERN_INVALID_ADDRESS && growstack &&
307 			    map != &kernel_map && curproc != NULL) {
308 				result = vm_map_growstack(curproc, vaddr);
309 				if (result == KERN_SUCCESS) {
310 					growstack = 0;
311 					++retry;
312 					goto RetryFault;
313 				}
314 				result = KERN_FAILURE;
315 			}
316 			goto done;
317 		}
318 
319 		/*
320    		 * If we are user-wiring a r/w segment, and it is COW, then
321    		 * we need to do the COW operation.  Note that we don't
322 		 * currently COW RO sections now, because it is NOT desirable
323    		 * to COW .text.  We simply keep .text from ever being COW'ed
324    		 * and take the heat that one cannot debug wired .text sections.
325    		 */
326 		result = vm_map_lookup(&fs.map, vaddr,
327 				       VM_PROT_READ|VM_PROT_WRITE|
328 				        VM_PROT_OVERRIDE_WRITE,
329 				       &fs.entry, &fs.first_object,
330 				       &first_pindex, &fs.first_prot,
331 				       &fs.wired);
332 		if (result != KERN_SUCCESS) {
333 			result = KERN_FAILURE;
334 			goto done;
335 		}
336 
337 		/*
338 		 * If we don't COW now, on a user wire, the user will never
339 		 * be able to write to the mapping.  If we don't make this
340 		 * restriction, the bookkeeping would be nearly impossible.
341 		 *
342 		 * XXX We have a shared lock, this will have a MP race but
343 		 * I don't see how it can hurt anything.
344 		 */
345 		if ((fs.entry->protection & VM_PROT_WRITE) == 0)
346 			fs.entry->max_protection &= ~VM_PROT_WRITE;
347 	}
348 
349 	/*
350 	 * fs.map is read-locked
351 	 *
352 	 * Misc checks.  Save the map generation number to detect races.
353 	 */
354 	fs.map_generation = fs.map->timestamp;
355 	fs.lookup_still_valid = TRUE;
356 	fs.first_m = NULL;
357 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
358 	fs.shared = 0;
359 	fs.vp = NULL;
360 
361 	if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
362 		if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
363 			panic("vm_fault: fault on nofault entry, addr: %p",
364 			      (void *)vaddr);
365 		}
366 		if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
367 		    vaddr >= fs.entry->start &&
368 		    vaddr < fs.entry->start + PAGE_SIZE) {
369 			panic("vm_fault: fault on stack guard, addr: %p",
370 			      (void *)vaddr);
371 		}
372 	}
373 
374 	/*
375 	 * A system map entry may return a NULL object.  No object means
376 	 * no pager means an unrecoverable kernel fault.
377 	 */
378 	if (fs.first_object == NULL) {
379 		panic("vm_fault: unrecoverable fault at %p in entry %p",
380 			(void *)vaddr, fs.entry);
381 	}
382 
383 	/*
384 	 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
385 	 * is set.
386 	 */
387 	if ((curthread->td_flags & TDF_NOFAULT) &&
388 	    (retry ||
389 	     fs.first_object->type == OBJT_VNODE ||
390 	     fs.first_object->backing_object)) {
391 		result = KERN_FAILURE;
392 		unlock_things(&fs);
393 		goto done2;
394 	}
395 
396 	/*
397 	 * Attempt to shortcut the fault if the lookup returns a
398 	 * terminal object and the page is present.  This allows us
399 	 * to obtain a shared token on the object instead of an exclusive
400 	 * token, which theoretically should allow concurrent faults.
401 	 *
402 	 * We cannot acquire a shared token on kernel_map, at least not
403 	 * on i386, because the i386 pmap code uses the kernel_object for
404 	 * its page table page management, resulting in a shared->exclusive
405 	 * sequence which will deadlock.  This will not happen normally
406 	 * anyway, except on well cached pageable kmem (like pipe buffers),
407 	 * so it should not impact performance.
408 	 */
409 	if (vm_shared_fault &&
410 	    fs.first_object->backing_object == NULL &&
411 	    fs.entry->maptype == VM_MAPTYPE_NORMAL &&
412 	    fs.map != &kernel_map) {
413 		int error;
414 		vm_object_hold_shared(fs.first_object);
415 		/*fs.vp = vnode_pager_lock(fs.first_object);*/
416 		fs.m = vm_page_lookup_busy_try(fs.first_object,
417 						first_pindex,
418 						TRUE, &error);
419 		if (error == 0 && fs.m) {
420 			/*
421 			 * Activate the page and figure out if we can
422 			 * short-cut a quick mapping.
423 			 *
424 			 * WARNING!  We cannot call swap_pager_unswapped()
425 			 *	     with a shared token!  Note that we
426 			 *	     have to test fs.first_prot here.
427 			 */
428 			vm_page_activate(fs.m);
429 			if (fs.m->valid == VM_PAGE_BITS_ALL &&
430 			    ((fs.m->flags & PG_SWAPPED) == 0 ||
431 			     (fs.first_prot & VM_PROT_WRITE) == 0 ||
432 			     (fs.fault_flags & VM_FAULT_DIRTY) == 0)) {
433 				fs.lookup_still_valid = TRUE;
434 				fs.first_m = NULL;
435 				fs.object = fs.first_object;
436 				fs.prot = fs.first_prot;
437 				if (fs.wired)
438 					fault_type = fs.first_prot;
439 				if (fs.prot & VM_PROT_WRITE) {
440 					vm_object_set_writeable_dirty(
441 							fs.m->object);
442 					vm_set_nosync(fs.m, fs.entry);
443 					if (fs.fault_flags & VM_FAULT_DIRTY) {
444 						vm_page_dirty(fs.m);
445 						/*XXX*/
446 						swap_pager_unswapped(fs.m);
447 					}
448 				}
449 				result = KERN_SUCCESS;
450 				fault_flags |= VM_FAULT_BURST_QUICK;
451 				fault_flags &= ~VM_FAULT_BURST;
452 				++vm_shared_hit;
453 				goto quick;
454 			}
455 			vm_page_wakeup(fs.m);
456 			fs.m = NULL;
457 		}
458 		vm_object_drop(fs.first_object); /* XXX drop on shared tok?*/
459 	}
460 	++vm_shared_miss;
461 
462 	/*
463 	 * Bump the paging-in-progress count to prevent size changes (e.g.
464 	 * truncation operations) during I/O.  This must be done after
465 	 * obtaining the vnode lock in order to avoid possible deadlocks.
466 	 */
467 	vm_object_hold(fs.first_object);
468 	if (fs.vp == NULL)
469 		fs.vp = vnode_pager_lock(fs.first_object);
470 
471 #if 0
472 	fs.lookup_still_valid = TRUE;
473 	fs.first_m = NULL;
474 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
475 	fs.shared = 0;
476 #endif
477 
478 	/*
479 	 * If the entry is wired we cannot change the page protection.
480 	 */
481 	if (fs.wired)
482 		fault_type = fs.first_prot;
483 
484 	/*
485 	 * The page we want is at (first_object, first_pindex), but if the
486 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
487 	 * page table to figure out the actual pindex.
488 	 *
489 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
490 	 * ONLY
491 	 */
492 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
493 		result = vm_fault_vpagetable(&fs, &first_pindex,
494 					     fs.entry->aux.master_pde,
495 					     fault_type, 1);
496 		if (result == KERN_TRY_AGAIN) {
497 			vm_object_drop(fs.first_object);
498 			++retry;
499 			goto RetryFault;
500 		}
501 		if (result != KERN_SUCCESS)
502 			goto done;
503 	}
504 
505 	/*
506 	 * Now we have the actual (object, pindex), fault in the page.  If
507 	 * vm_fault_object() fails it will unlock and deallocate the FS
508 	 * data.   If it succeeds everything remains locked and fs->object
509 	 * will have an additional PIP count if it is not equal to
510 	 * fs->first_object
511 	 *
512 	 * vm_fault_object will set fs->prot for the pmap operation.  It is
513 	 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
514 	 * page can be safely written.  However, it will force a read-only
515 	 * mapping for a read fault if the memory is managed by a virtual
516 	 * page table.
517 	 *
518 	 * If the fault code uses the shared object lock shortcut
519 	 * we must not try to burst (we can't allocate VM pages).
520 	 */
521 	result = vm_fault_object(&fs, first_pindex, fault_type, 1);
522 	if (fs.shared)
523 		fault_flags &= ~VM_FAULT_BURST;
524 
525 	if (result == KERN_TRY_AGAIN) {
526 		vm_object_drop(fs.first_object);
527 		++retry;
528 		goto RetryFault;
529 	}
530 	if (result != KERN_SUCCESS)
531 		goto done;
532 
533 quick:
534 	/*
535 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
536 	 * will contain a busied page.
537 	 *
538 	 * Enter the page into the pmap and do pmap-related adjustments.
539 	 */
540 	vm_page_flag_set(fs.m, PG_REFERENCED);
541 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, fs.entry);
542 	mycpu->gd_cnt.v_vm_faults++;
543 	if (curthread->td_lwp)
544 		++curthread->td_lwp->lwp_ru.ru_minflt;
545 
546 	/*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */
547 	KKASSERT(fs.m->flags & PG_BUSY);
548 
549 	/*
550 	 * If the page is not wired down, then put it where the pageout daemon
551 	 * can find it.
552 	 */
553 	if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
554 		if (fs.wired)
555 			vm_page_wire(fs.m);
556 		else
557 			vm_page_unwire(fs.m, 1);
558 	} else {
559 		vm_page_activate(fs.m);
560 	}
561 	vm_page_wakeup(fs.m);
562 
563 	/*
564 	 * Burst in a few more pages if possible.  The fs.map should still
565 	 * be locked.  To avoid interlocking against a vnode->getblk
566 	 * operation we had to be sure to unbusy our primary vm_page above
567 	 * first.
568 	 */
569 	if (fault_flags & VM_FAULT_BURST) {
570 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0
571 		    && fs.wired == 0) {
572 			vm_prefault(fs.map->pmap, vaddr,
573 				    fs.entry, fs.prot, fault_flags);
574 		}
575 	}
576 	if (fault_flags & VM_FAULT_BURST_QUICK) {
577 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0
578 		    && fs.wired == 0) {
579 			vm_prefault_quick(fs.map->pmap, vaddr,
580 					  fs.entry, fs.prot, fault_flags);
581 		}
582 	}
583 
584 	/*
585 	 * Unlock everything, and return
586 	 */
587 	unlock_things(&fs);
588 
589 	if (curthread->td_lwp) {
590 		if (fs.hardfault) {
591 			curthread->td_lwp->lwp_ru.ru_majflt++;
592 		} else {
593 			curthread->td_lwp->lwp_ru.ru_minflt++;
594 		}
595 	}
596 
597 	/*vm_object_deallocate(fs.first_object);*/
598 	/*fs.m = NULL; */
599 	/*fs.first_object = NULL; must still drop later */
600 
601 	result = KERN_SUCCESS;
602 done:
603 	if (fs.first_object)
604 		vm_object_drop(fs.first_object);
605 done2:
606 	lwkt_reltoken(&map->token);
607 	if (lp)
608 		lp->lwp_flags &= ~LWP_PAGING;
609 	return (result);
610 }
611 
612 /*
613  * Fault in the specified virtual address in the current process map,
614  * returning a held VM page or NULL.  See vm_fault_page() for more
615  * information.
616  *
617  * No requirements.
618  */
619 vm_page_t
620 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
621 {
622 	struct lwp *lp = curthread->td_lwp;
623 	vm_page_t m;
624 
625 	m = vm_fault_page(&lp->lwp_vmspace->vm_map, va,
626 			  fault_type, VM_FAULT_NORMAL, errorp);
627 	return(m);
628 }
629 
630 /*
631  * Fault in the specified virtual address in the specified map, doing all
632  * necessary manipulation of the object store and all necessary I/O.  Return
633  * a held VM page or NULL, and set *errorp.  The related pmap is not
634  * updated.
635  *
636  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
637  * and marked PG_REFERENCED as well.
638  *
639  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
640  * error will be returned.
641  *
642  * No requirements.
643  */
644 vm_page_t
645 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
646 	      int fault_flags, int *errorp)
647 {
648 	vm_pindex_t first_pindex;
649 	struct faultstate fs;
650 	int result;
651 	int retry = 0;
652 	vm_prot_t orig_fault_type = fault_type;
653 
654 	fs.hardfault = 0;
655 	fs.fault_flags = fault_flags;
656 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
657 
658 	lwkt_gettoken(&map->token);
659 
660 RetryFault:
661 	/*
662 	 * Find the vm_map_entry representing the backing store and resolve
663 	 * the top level object and page index.  This may have the side
664 	 * effect of executing a copy-on-write on the map entry and/or
665 	 * creating a shadow object, but will not COW any actual VM pages.
666 	 *
667 	 * On success fs.map is left read-locked and various other fields
668 	 * are initialized but not otherwise referenced or locked.
669 	 *
670 	 * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
671 	 * if the map entry is a virtual page table and also writable,
672 	 * so we can set the 'A'accessed bit in the virtual page table entry.
673 	 */
674 	fs.map = map;
675 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
676 			       &fs.entry, &fs.first_object,
677 			       &first_pindex, &fs.first_prot, &fs.wired);
678 
679 	if (result != KERN_SUCCESS) {
680 		*errorp = result;
681 		fs.m = NULL;
682 		goto done;
683 	}
684 
685 	/*
686 	 * fs.map is read-locked
687 	 *
688 	 * Misc checks.  Save the map generation number to detect races.
689 	 */
690 	fs.map_generation = fs.map->timestamp;
691 	fs.lookup_still_valid = TRUE;
692 	fs.first_m = NULL;
693 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
694 	fs.shared = 0;
695 	fs.vp = NULL;
696 
697 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
698 		panic("vm_fault: fault on nofault entry, addr: %lx",
699 		    (u_long)vaddr);
700 	}
701 
702 	/*
703 	 * A system map entry may return a NULL object.  No object means
704 	 * no pager means an unrecoverable kernel fault.
705 	 */
706 	if (fs.first_object == NULL) {
707 		panic("vm_fault: unrecoverable fault at %p in entry %p",
708 			(void *)vaddr, fs.entry);
709 	}
710 
711 	/*
712 	 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
713 	 * is set.
714 	 */
715 	if ((curthread->td_flags & TDF_NOFAULT) &&
716 	    (retry ||
717 	     fs.first_object->type == OBJT_VNODE ||
718 	     fs.first_object->backing_object)) {
719 		*errorp = KERN_FAILURE;
720 		unlock_things(&fs);
721 		goto done2;
722 	}
723 
724 	/*
725 	 * Make a reference to this object to prevent its disposal while we
726 	 * are messing with it.  Once we have the reference, the map is free
727 	 * to be diddled.  Since objects reference their shadows (and copies),
728 	 * they will stay around as well.
729 	 *
730 	 * The reference should also prevent an unexpected collapse of the
731 	 * parent that might move pages from the current object into the
732 	 * parent unexpectedly, resulting in corruption.
733 	 *
734 	 * Bump the paging-in-progress count to prevent size changes (e.g.
735 	 * truncation operations) during I/O.  This must be done after
736 	 * obtaining the vnode lock in order to avoid possible deadlocks.
737 	 */
738 	vm_object_hold(fs.first_object);
739 	fs.vp = vnode_pager_lock(fs.first_object);
740 
741 #if 0
742 	fs.lookup_still_valid = TRUE;
743 	fs.first_m = NULL;
744 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
745 	fs.shared = 0;
746 #endif
747 
748 	/*
749 	 * If the entry is wired we cannot change the page protection.
750 	 */
751 	if (fs.wired)
752 		fault_type = fs.first_prot;
753 
754 	/*
755 	 * The page we want is at (first_object, first_pindex), but if the
756 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
757 	 * page table to figure out the actual pindex.
758 	 *
759 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
760 	 * ONLY
761 	 */
762 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
763 		result = vm_fault_vpagetable(&fs, &first_pindex,
764 					     fs.entry->aux.master_pde,
765 					     fault_type, 1);
766 		if (result == KERN_TRY_AGAIN) {
767 			vm_object_drop(fs.first_object);
768 			++retry;
769 			goto RetryFault;
770 		}
771 		if (result != KERN_SUCCESS) {
772 			*errorp = result;
773 			fs.m = NULL;
774 			goto done;
775 		}
776 	}
777 
778 	/*
779 	 * Now we have the actual (object, pindex), fault in the page.  If
780 	 * vm_fault_object() fails it will unlock and deallocate the FS
781 	 * data.   If it succeeds everything remains locked and fs->object
782 	 * will have an additinal PIP count if it is not equal to
783 	 * fs->first_object
784 	 */
785 	fs.m = NULL;
786 	result = vm_fault_object(&fs, first_pindex, fault_type, 1);
787 
788 	if (result == KERN_TRY_AGAIN) {
789 		vm_object_drop(fs.first_object);
790 		++retry;
791 		goto RetryFault;
792 	}
793 	if (result != KERN_SUCCESS) {
794 		*errorp = result;
795 		fs.m = NULL;
796 		goto done;
797 	}
798 
799 	if ((orig_fault_type & VM_PROT_WRITE) &&
800 	    (fs.prot & VM_PROT_WRITE) == 0) {
801 		*errorp = KERN_PROTECTION_FAILURE;
802 		unlock_and_deallocate(&fs);
803 		fs.m = NULL;
804 		goto done;
805 	}
806 
807 	/*
808 	 * DO NOT UPDATE THE PMAP!!!  This function may be called for
809 	 * a pmap unrelated to the current process pmap, in which case
810 	 * the current cpu core will not be listed in the pmap's pm_active
811 	 * mask.  Thus invalidation interlocks will fail to work properly.
812 	 *
813 	 * (for example, 'ps' uses procfs to read program arguments from
814 	 * each process's stack).
815 	 *
816 	 * In addition to the above this function will be called to acquire
817 	 * a page that might already be faulted in, re-faulting it
818 	 * continuously is a waste of time.
819 	 *
820 	 * XXX could this have been the cause of our random seg-fault
821 	 *     issues?  procfs accesses user stacks.
822 	 */
823 	vm_page_flag_set(fs.m, PG_REFERENCED);
824 #if 0
825 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, NULL);
826 	mycpu->gd_cnt.v_vm_faults++;
827 	if (curthread->td_lwp)
828 		++curthread->td_lwp->lwp_ru.ru_minflt;
829 #endif
830 
831 	/*
832 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
833 	 * will contain a busied page.  So we must unlock here after having
834 	 * messed with the pmap.
835 	 */
836 	unlock_things(&fs);
837 
838 	/*
839 	 * Return a held page.  We are not doing any pmap manipulation so do
840 	 * not set PG_MAPPED.  However, adjust the page flags according to
841 	 * the fault type because the caller may not use a managed pmapping
842 	 * (so we don't want to lose the fact that the page will be dirtied
843 	 * if a write fault was specified).
844 	 */
845 	vm_page_hold(fs.m);
846 	vm_page_activate(fs.m);
847 	if (fault_type & VM_PROT_WRITE)
848 		vm_page_dirty(fs.m);
849 
850 	if (curthread->td_lwp) {
851 		if (fs.hardfault) {
852 			curthread->td_lwp->lwp_ru.ru_majflt++;
853 		} else {
854 			curthread->td_lwp->lwp_ru.ru_minflt++;
855 		}
856 	}
857 
858 	/*
859 	 * Unlock everything, and return the held page.
860 	 */
861 	vm_page_wakeup(fs.m);
862 	/*vm_object_deallocate(fs.first_object);*/
863 	/*fs.first_object = NULL; */
864 	*errorp = 0;
865 
866 done:
867 	if (fs.first_object)
868 		vm_object_drop(fs.first_object);
869 done2:
870 	lwkt_reltoken(&map->token);
871 	return(fs.m);
872 }
873 
874 /*
875  * Fault in the specified (object,offset), dirty the returned page as
876  * needed.  If the requested fault_type cannot be done NULL and an
877  * error is returned.
878  *
879  * A held (but not busied) page is returned.
880  *
881  * No requirements.
882  */
883 vm_page_t
884 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
885 		     vm_prot_t fault_type, int fault_flags,
886 		     int shared, int *errorp)
887 {
888 	int result;
889 	vm_pindex_t first_pindex;
890 	struct faultstate fs;
891 	struct vm_map_entry entry;
892 
893 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
894 	bzero(&entry, sizeof(entry));
895 	entry.object.vm_object = object;
896 	entry.maptype = VM_MAPTYPE_NORMAL;
897 	entry.protection = entry.max_protection = fault_type;
898 
899 	fs.hardfault = 0;
900 	fs.fault_flags = fault_flags;
901 	fs.map = NULL;
902 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
903 
904 RetryFault:
905 
906 	fs.first_object = object;
907 	first_pindex = OFF_TO_IDX(offset);
908 	fs.entry = &entry;
909 	fs.first_prot = fault_type;
910 	fs.wired = 0;
911 	fs.shared = shared;
912 	/*fs.map_generation = 0; unused */
913 
914 	/*
915 	 * Make a reference to this object to prevent its disposal while we
916 	 * are messing with it.  Once we have the reference, the map is free
917 	 * to be diddled.  Since objects reference their shadows (and copies),
918 	 * they will stay around as well.
919 	 *
920 	 * The reference should also prevent an unexpected collapse of the
921 	 * parent that might move pages from the current object into the
922 	 * parent unexpectedly, resulting in corruption.
923 	 *
924 	 * Bump the paging-in-progress count to prevent size changes (e.g.
925 	 * truncation operations) during I/O.  This must be done after
926 	 * obtaining the vnode lock in order to avoid possible deadlocks.
927 	 */
928 	fs.vp = vnode_pager_lock(fs.first_object);
929 
930 	fs.lookup_still_valid = TRUE;
931 	fs.first_m = NULL;
932 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
933 
934 #if 0
935 	/* XXX future - ability to operate on VM object using vpagetable */
936 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
937 		result = vm_fault_vpagetable(&fs, &first_pindex,
938 					     fs.entry->aux.master_pde,
939 					     fault_type, 0);
940 		if (result == KERN_TRY_AGAIN)
941 			goto RetryFault;
942 		if (result != KERN_SUCCESS) {
943 			*errorp = result;
944 			return (NULL);
945 		}
946 	}
947 #endif
948 
949 	/*
950 	 * Now we have the actual (object, pindex), fault in the page.  If
951 	 * vm_fault_object() fails it will unlock and deallocate the FS
952 	 * data.   If it succeeds everything remains locked and fs->object
953 	 * will have an additinal PIP count if it is not equal to
954 	 * fs->first_object
955 	 */
956 	result = vm_fault_object(&fs, first_pindex, fault_type, 0);
957 
958 	if (result == KERN_TRY_AGAIN)
959 		goto RetryFault;
960 	if (result != KERN_SUCCESS) {
961 		*errorp = result;
962 		return(NULL);
963 	}
964 
965 	if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
966 		*errorp = KERN_PROTECTION_FAILURE;
967 		unlock_and_deallocate(&fs);
968 		return(NULL);
969 	}
970 
971 	/*
972 	 * On success vm_fault_object() does not unlock or deallocate, so we
973 	 * do it here.  Note that the returned fs.m will be busied.
974 	 */
975 	unlock_things(&fs);
976 
977 	/*
978 	 * Return a held page.  We are not doing any pmap manipulation so do
979 	 * not set PG_MAPPED.  However, adjust the page flags according to
980 	 * the fault type because the caller may not use a managed pmapping
981 	 * (so we don't want to lose the fact that the page will be dirtied
982 	 * if a write fault was specified).
983 	 */
984 	vm_page_hold(fs.m);
985 	vm_page_activate(fs.m);
986 	if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY))
987 		vm_page_dirty(fs.m);
988 	if (fault_flags & VM_FAULT_UNSWAP)
989 		swap_pager_unswapped(fs.m);
990 
991 	/*
992 	 * Indicate that the page was accessed.
993 	 */
994 	vm_page_flag_set(fs.m, PG_REFERENCED);
995 
996 	if (curthread->td_lwp) {
997 		if (fs.hardfault) {
998 			curthread->td_lwp->lwp_ru.ru_majflt++;
999 		} else {
1000 			curthread->td_lwp->lwp_ru.ru_minflt++;
1001 		}
1002 	}
1003 
1004 	/*
1005 	 * Unlock everything, and return the held page.
1006 	 */
1007 	vm_page_wakeup(fs.m);
1008 	/*vm_object_deallocate(fs.first_object);*/
1009 	/*fs.first_object = NULL; */
1010 
1011 	*errorp = 0;
1012 	return(fs.m);
1013 }
1014 
1015 /*
1016  * Translate the virtual page number (first_pindex) that is relative
1017  * to the address space into a logical page number that is relative to the
1018  * backing object.  Use the virtual page table pointed to by (vpte).
1019  *
1020  * This implements an N-level page table.  Any level can terminate the
1021  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
1022  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
1023  */
1024 static
1025 int
1026 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
1027 		    vpte_t vpte, int fault_type, int allow_nofault)
1028 {
1029 	struct lwbuf *lwb;
1030 	struct lwbuf lwb_cache;
1031 	int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
1032 	int result = KERN_SUCCESS;
1033 	vpte_t *ptep;
1034 
1035 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1036 	for (;;) {
1037 		/*
1038 		 * We cannot proceed if the vpte is not valid, not readable
1039 		 * for a read fault, or not writable for a write fault.
1040 		 */
1041 		if ((vpte & VPTE_V) == 0) {
1042 			unlock_and_deallocate(fs);
1043 			return (KERN_FAILURE);
1044 		}
1045 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
1046 			unlock_and_deallocate(fs);
1047 			return (KERN_FAILURE);
1048 		}
1049 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
1050 			unlock_and_deallocate(fs);
1051 			return (KERN_FAILURE);
1052 		}
1053 		if ((vpte & VPTE_PS) || vshift == 0)
1054 			break;
1055 		KKASSERT(vshift >= VPTE_PAGE_BITS);
1056 
1057 		/*
1058 		 * Get the page table page.  Nominally we only read the page
1059 		 * table, but since we are actively setting VPTE_M and VPTE_A,
1060 		 * tell vm_fault_object() that we are writing it.
1061 		 *
1062 		 * There is currently no real need to optimize this.
1063 		 */
1064 		result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
1065 					 VM_PROT_READ|VM_PROT_WRITE,
1066 					 allow_nofault);
1067 		if (result != KERN_SUCCESS)
1068 			return (result);
1069 
1070 		/*
1071 		 * Process the returned fs.m and look up the page table
1072 		 * entry in the page table page.
1073 		 */
1074 		vshift -= VPTE_PAGE_BITS;
1075 		lwb = lwbuf_alloc(fs->m, &lwb_cache);
1076 		ptep = ((vpte_t *)lwbuf_kva(lwb) +
1077 		        ((*pindex >> vshift) & VPTE_PAGE_MASK));
1078 		vpte = *ptep;
1079 
1080 		/*
1081 		 * Page table write-back.  If the vpte is valid for the
1082 		 * requested operation, do a write-back to the page table.
1083 		 *
1084 		 * XXX VPTE_M is not set properly for page directory pages.
1085 		 * It doesn't get set in the page directory if the page table
1086 		 * is modified during a read access.
1087 		 */
1088 		vm_page_activate(fs->m);
1089 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
1090 		    (vpte & VPTE_W)) {
1091 			if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
1092 				atomic_set_long(ptep, VPTE_M | VPTE_A);
1093 				vm_page_dirty(fs->m);
1094 			}
1095 		}
1096 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
1097 		    (vpte & VPTE_R)) {
1098 			if ((vpte & VPTE_A) == 0) {
1099 				atomic_set_long(ptep, VPTE_A);
1100 				vm_page_dirty(fs->m);
1101 			}
1102 		}
1103 		lwbuf_free(lwb);
1104 		vm_page_flag_set(fs->m, PG_REFERENCED);
1105 		vm_page_wakeup(fs->m);
1106 		fs->m = NULL;
1107 		cleanup_successful_fault(fs);
1108 	}
1109 	/*
1110 	 * Combine remaining address bits with the vpte.
1111 	 */
1112 	/* JG how many bits from each? */
1113 	*pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
1114 		  (*pindex & ((1L << vshift) - 1));
1115 	return (KERN_SUCCESS);
1116 }
1117 
1118 
1119 /*
1120  * This is the core of the vm_fault code.
1121  *
1122  * Do all operations required to fault-in (fs.first_object, pindex).  Run
1123  * through the shadow chain as necessary and do required COW or virtual
1124  * copy operations.  The caller has already fully resolved the vm_map_entry
1125  * and, if appropriate, has created a copy-on-write layer.  All we need to
1126  * do is iterate the object chain.
1127  *
1128  * On failure (fs) is unlocked and deallocated and the caller may return or
1129  * retry depending on the failure code.  On success (fs) is NOT unlocked or
1130  * deallocated, fs.m will contained a resolved, busied page, and fs.object
1131  * will have an additional PIP count if it is not equal to fs.first_object.
1132  *
1133  * fs->first_object must be held on call.
1134  */
1135 static
1136 int
1137 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex,
1138 		vm_prot_t fault_type, int allow_nofault)
1139 {
1140 	vm_object_t next_object;
1141 	vm_pindex_t pindex;
1142 	int error;
1143 
1144 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1145 	fs->prot = fs->first_prot;
1146 	fs->object = fs->first_object;
1147 	pindex = first_pindex;
1148 
1149 	vm_object_chain_acquire(fs->first_object);
1150 	vm_object_pip_add(fs->first_object, 1);
1151 
1152 	/*
1153 	 * If a read fault occurs we try to make the page writable if
1154 	 * possible.  There are three cases where we cannot make the
1155 	 * page mapping writable:
1156 	 *
1157 	 * (1) The mapping is read-only or the VM object is read-only,
1158 	 *     fs->prot above will simply not have VM_PROT_WRITE set.
1159 	 *
1160 	 * (2) If the mapping is a virtual page table we need to be able
1161 	 *     to detect writes so we can set VPTE_M in the virtual page
1162 	 *     table.
1163 	 *
1164 	 * (3) If the VM page is read-only or copy-on-write, upgrading would
1165 	 *     just result in an unnecessary COW fault.
1166 	 *
1167 	 * VM_PROT_VPAGED is set if faulting via a virtual page table and
1168 	 * causes adjustments to the 'M'odify bit to also turn off write
1169 	 * access to force a re-fault.
1170 	 */
1171 	if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1172 		if ((fault_type & VM_PROT_WRITE) == 0)
1173 			fs->prot &= ~VM_PROT_WRITE;
1174 	}
1175 
1176 	/* vm_object_hold(fs->object); implied b/c object == first_object */
1177 
1178 	for (;;) {
1179 		/*
1180 		 * The entire backing chain from first_object to object
1181 		 * inclusive is chainlocked.
1182 		 *
1183 		 * If the object is dead, we stop here
1184 		 *
1185 		 * vm_shared_fault (fs->shared != 0) case: nothing special.
1186 		 */
1187 		if (fs->object->flags & OBJ_DEAD) {
1188 			vm_object_pip_wakeup(fs->first_object);
1189 			vm_object_chain_release_all(fs->first_object,
1190 						    fs->object);
1191 			if (fs->object != fs->first_object)
1192 				vm_object_drop(fs->object);
1193 			unlock_and_deallocate(fs);
1194 			return (KERN_PROTECTION_FAILURE);
1195 		}
1196 
1197 		/*
1198 		 * See if the page is resident.  Wait/Retry if the page is
1199 		 * busy (lots of stuff may have changed so we can't continue
1200 		 * in that case).
1201 		 *
1202 		 * We can theoretically allow the soft-busy case on a read
1203 		 * fault if the page is marked valid, but since such
1204 		 * pages are typically already pmap'd, putting that
1205 		 * special case in might be more effort then it is
1206 		 * worth.  We cannot under any circumstances mess
1207 		 * around with a vm_page_t->busy page except, perhaps,
1208 		 * to pmap it.
1209 		 *
1210 		 * vm_shared_fault (fs->shared != 0) case:
1211 		 *	error		nothing special
1212 		 *	fs->m		relock excl if I/O needed
1213 		 *	NULL		relock excl
1214 		 */
1215 		fs->m = vm_page_lookup_busy_try(fs->object, pindex,
1216 						TRUE, &error);
1217 		if (error) {
1218 			vm_object_pip_wakeup(fs->first_object);
1219 			vm_object_chain_release_all(fs->first_object,
1220 						    fs->object);
1221 			if (fs->object != fs->first_object)
1222 				vm_object_drop(fs->object);
1223 			unlock_things(fs);
1224 			vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1225 			mycpu->gd_cnt.v_intrans++;
1226 			/*vm_object_deallocate(fs->first_object);*/
1227 			/*fs->first_object = NULL;*/
1228 			fs->m = NULL;
1229 			return (KERN_TRY_AGAIN);
1230 		}
1231 		if (fs->m) {
1232 			/*
1233 			 * The page is busied for us.
1234 			 *
1235 			 * If reactivating a page from PQ_CACHE we may have
1236 			 * to rate-limit.
1237 			 */
1238 			int queue = fs->m->queue;
1239 			vm_page_unqueue_nowakeup(fs->m);
1240 
1241 			if ((queue - fs->m->pc) == PQ_CACHE &&
1242 			    vm_page_count_severe()) {
1243 				vm_page_activate(fs->m);
1244 				vm_page_wakeup(fs->m);
1245 				fs->m = NULL;
1246 				vm_object_pip_wakeup(fs->first_object);
1247 				vm_object_chain_release_all(fs->first_object,
1248 							    fs->object);
1249 				if (fs->object != fs->first_object)
1250 					vm_object_drop(fs->object);
1251 				unlock_and_deallocate(fs);
1252 				if (allow_nofault == 0 ||
1253 				    (curthread->td_flags & TDF_NOFAULT) == 0) {
1254 					vm_wait_pfault();
1255 				}
1256 				return (KERN_TRY_AGAIN);
1257 			}
1258 
1259 			/*
1260 			 * If it still isn't completely valid (readable),
1261 			 * or if a read-ahead-mark is set on the VM page,
1262 			 * jump to readrest, else we found the page and
1263 			 * can return.
1264 			 *
1265 			 * We can release the spl once we have marked the
1266 			 * page busy.
1267 			 */
1268 			if (fs->m->object != &kernel_object) {
1269 				if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1270 				    VM_PAGE_BITS_ALL) {
1271 					if (fs->shared) {
1272 						vm_object_drop(fs->object);
1273 						vm_object_hold(fs->object);
1274 						fs->shared = 0;
1275 					}
1276 					goto readrest;
1277 				}
1278 				if (fs->m->flags & PG_RAM) {
1279 					if (debug_cluster)
1280 						kprintf("R");
1281 					vm_page_flag_clear(fs->m, PG_RAM);
1282 					if (fs->shared) {
1283 						vm_object_drop(fs->object);
1284 						vm_object_hold(fs->object);
1285 						fs->shared = 0;
1286 					}
1287 					goto readrest;
1288 				}
1289 			}
1290 			break; /* break to PAGE HAS BEEN FOUND */
1291 		}
1292 
1293 		if (fs->shared) {
1294 			vm_object_drop(fs->object);
1295 			vm_object_hold(fs->object);
1296 			fs->shared = 0;
1297 		}
1298 
1299 		/*
1300 		 * Page is not resident, If this is the search termination
1301 		 * or the pager might contain the page, allocate a new page.
1302 		 */
1303 		if (TRYPAGER(fs) || fs->object == fs->first_object) {
1304 			/*
1305 			 * If the page is beyond the object size we fail
1306 			 */
1307 			if (pindex >= fs->object->size) {
1308 				vm_object_pip_wakeup(fs->first_object);
1309 				vm_object_chain_release_all(fs->first_object,
1310 							    fs->object);
1311 				if (fs->object != fs->first_object)
1312 					vm_object_drop(fs->object);
1313 				unlock_and_deallocate(fs);
1314 				return (KERN_PROTECTION_FAILURE);
1315 			}
1316 
1317 			/*
1318 			 * Allocate a new page for this object/offset pair.
1319 			 *
1320 			 * It is possible for the allocation to race, so
1321 			 * handle the case.
1322 			 */
1323 			fs->m = NULL;
1324 			if (!vm_page_count_severe()) {
1325 				fs->m = vm_page_alloc(fs->object, pindex,
1326 				    ((fs->vp || fs->object->backing_object) ?
1327 					VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL :
1328 					VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL |
1329 					VM_ALLOC_USE_GD | VM_ALLOC_ZERO));
1330 			}
1331 			if (fs->m == NULL) {
1332 				vm_object_pip_wakeup(fs->first_object);
1333 				vm_object_chain_release_all(fs->first_object,
1334 							    fs->object);
1335 				if (fs->object != fs->first_object)
1336 					vm_object_drop(fs->object);
1337 				unlock_and_deallocate(fs);
1338 				if (allow_nofault == 0 ||
1339 				    (curthread->td_flags & TDF_NOFAULT) == 0) {
1340 					vm_wait_pfault();
1341 				}
1342 				return (KERN_TRY_AGAIN);
1343 			}
1344 
1345 			/*
1346 			 * Fall through to readrest.  We have a new page which
1347 			 * will have to be paged (since m->valid will be 0).
1348 			 */
1349 		}
1350 
1351 readrest:
1352 		/*
1353 		 * We have found an invalid or partially valid page, a
1354 		 * page with a read-ahead mark which might be partially or
1355 		 * fully valid (and maybe dirty too), or we have allocated
1356 		 * a new page.
1357 		 *
1358 		 * Attempt to fault-in the page if there is a chance that the
1359 		 * pager has it, and potentially fault in additional pages
1360 		 * at the same time.
1361 		 *
1362 		 * If TRYPAGER is true then fs.m will be non-NULL and busied
1363 		 * for us.
1364 		 */
1365 		if (TRYPAGER(fs)) {
1366 			int rv;
1367 			int seqaccess;
1368 			u_char behavior = vm_map_entry_behavior(fs->entry);
1369 
1370 			if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1371 				seqaccess = 0;
1372 			else
1373 				seqaccess = -1;
1374 
1375 #if 0
1376 			/*
1377 			 * If sequential access is detected then attempt
1378 			 * to deactivate/cache pages behind the scan to
1379 			 * prevent resource hogging.
1380 			 *
1381 			 * Use of PG_RAM to detect sequential access
1382 			 * also simulates multi-zone sequential access
1383 			 * detection for free.
1384 			 *
1385 			 * NOTE: Partially valid dirty pages cannot be
1386 			 *	 deactivated without causing NFS picemeal
1387 			 *	 writes to barf.
1388 			 */
1389 			if ((fs->first_object->type != OBJT_DEVICE) &&
1390 			    (fs->first_object->type != OBJT_MGTDEVICE) &&
1391 			    (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1392                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1393 				 (fs->m->flags & PG_RAM)))
1394 			) {
1395 				vm_pindex_t scan_pindex;
1396 				int scan_count = 16;
1397 
1398 				if (first_pindex < 16) {
1399 					scan_pindex = 0;
1400 					scan_count = 0;
1401 				} else {
1402 					scan_pindex = first_pindex - 16;
1403 					if (scan_pindex < 16)
1404 						scan_count = scan_pindex;
1405 					else
1406 						scan_count = 16;
1407 				}
1408 
1409 				while (scan_count) {
1410 					vm_page_t mt;
1411 
1412 					mt = vm_page_lookup(fs->first_object,
1413 							    scan_pindex);
1414 					if (mt == NULL)
1415 						break;
1416 					if (vm_page_busy_try(mt, TRUE))
1417 						goto skip;
1418 
1419 					if (mt->valid != VM_PAGE_BITS_ALL) {
1420 						vm_page_wakeup(mt);
1421 						break;
1422 					}
1423 					if ((mt->flags &
1424 					     (PG_FICTITIOUS | PG_UNMANAGED |
1425 					      PG_NEED_COMMIT)) ||
1426 					    mt->hold_count ||
1427 					    mt->wire_count)  {
1428 						vm_page_wakeup(mt);
1429 						goto skip;
1430 					}
1431 					if (mt->dirty == 0)
1432 						vm_page_test_dirty(mt);
1433 					if (mt->dirty) {
1434 						vm_page_protect(mt,
1435 								VM_PROT_NONE);
1436 						vm_page_deactivate(mt);
1437 						vm_page_wakeup(mt);
1438 					} else {
1439 						vm_page_cache(mt);
1440 					}
1441 skip:
1442 					--scan_count;
1443 					--scan_pindex;
1444 				}
1445 
1446 				seqaccess = 1;
1447 			}
1448 #endif
1449 
1450 			/*
1451 			 * Avoid deadlocking against the map when doing I/O.
1452 			 * fs.object and the page is PG_BUSY'd.
1453 			 *
1454 			 * NOTE: Once unlocked, fs->entry can become stale
1455 			 *	 so this will NULL it out.
1456 			 *
1457 			 * NOTE: fs->entry is invalid until we relock the
1458 			 *	 map and verify that the timestamp has not
1459 			 *	 changed.
1460 			 */
1461 			unlock_map(fs);
1462 
1463 			/*
1464 			 * Acquire the page data.  We still hold a ref on
1465 			 * fs.object and the page has been PG_BUSY's.
1466 			 *
1467 			 * The pager may replace the page (for example, in
1468 			 * order to enter a fictitious page into the
1469 			 * object).  If it does so it is responsible for
1470 			 * cleaning up the passed page and properly setting
1471 			 * the new page PG_BUSY.
1472 			 *
1473 			 * If we got here through a PG_RAM read-ahead
1474 			 * mark the page may be partially dirty and thus
1475 			 * not freeable.  Don't bother checking to see
1476 			 * if the pager has the page because we can't free
1477 			 * it anyway.  We have to depend on the get_page
1478 			 * operation filling in any gaps whether there is
1479 			 * backing store or not.
1480 			 */
1481 			rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1482 
1483 			if (rv == VM_PAGER_OK) {
1484 				/*
1485 				 * Relookup in case pager changed page. Pager
1486 				 * is responsible for disposition of old page
1487 				 * if moved.
1488 				 *
1489 				 * XXX other code segments do relookups too.
1490 				 * It's a bad abstraction that needs to be
1491 				 * fixed/removed.
1492 				 */
1493 				fs->m = vm_page_lookup(fs->object, pindex);
1494 				if (fs->m == NULL) {
1495 					vm_object_pip_wakeup(fs->first_object);
1496 					vm_object_chain_release_all(
1497 						fs->first_object, fs->object);
1498 					if (fs->object != fs->first_object)
1499 						vm_object_drop(fs->object);
1500 					unlock_and_deallocate(fs);
1501 					return (KERN_TRY_AGAIN);
1502 				}
1503 
1504 				++fs->hardfault;
1505 				break; /* break to PAGE HAS BEEN FOUND */
1506 			}
1507 
1508 			/*
1509 			 * Remove the bogus page (which does not exist at this
1510 			 * object/offset); before doing so, we must get back
1511 			 * our object lock to preserve our invariant.
1512 			 *
1513 			 * Also wake up any other process that may want to bring
1514 			 * in this page.
1515 			 *
1516 			 * If this is the top-level object, we must leave the
1517 			 * busy page to prevent another process from rushing
1518 			 * past us, and inserting the page in that object at
1519 			 * the same time that we are.
1520 			 */
1521 			if (rv == VM_PAGER_ERROR) {
1522 				if (curproc) {
1523 					kprintf("vm_fault: pager read error, "
1524 						"pid %d (%s)\n",
1525 						curproc->p_pid,
1526 						curproc->p_comm);
1527 				} else {
1528 					kprintf("vm_fault: pager read error, "
1529 						"thread %p (%s)\n",
1530 						curthread,
1531 						curproc->p_comm);
1532 				}
1533 			}
1534 
1535 			/*
1536 			 * Data outside the range of the pager or an I/O error
1537 			 *
1538 			 * The page may have been wired during the pagein,
1539 			 * e.g. by the buffer cache, and cannot simply be
1540 			 * freed.  Call vnode_pager_freepage() to deal with it.
1541 			 */
1542 			/*
1543 			 * XXX - the check for kernel_map is a kludge to work
1544 			 * around having the machine panic on a kernel space
1545 			 * fault w/ I/O error.
1546 			 */
1547 			if (((fs->map != &kernel_map) &&
1548 			    (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1549 				vnode_pager_freepage(fs->m);
1550 				fs->m = NULL;
1551 				vm_object_pip_wakeup(fs->first_object);
1552 				vm_object_chain_release_all(fs->first_object,
1553 							    fs->object);
1554 				if (fs->object != fs->first_object)
1555 					vm_object_drop(fs->object);
1556 				unlock_and_deallocate(fs);
1557 				if (rv == VM_PAGER_ERROR)
1558 					return (KERN_FAILURE);
1559 				else
1560 					return (KERN_PROTECTION_FAILURE);
1561 				/* NOT REACHED */
1562 			}
1563 			if (fs->object != fs->first_object) {
1564 				vnode_pager_freepage(fs->m);
1565 				fs->m = NULL;
1566 				/*
1567 				 * XXX - we cannot just fall out at this
1568 				 * point, m has been freed and is invalid!
1569 				 */
1570 			}
1571 		}
1572 
1573 		/*
1574 		 * We get here if the object has a default pager (or unwiring)
1575 		 * or the pager doesn't have the page.
1576 		 */
1577 		if (fs->object == fs->first_object)
1578 			fs->first_m = fs->m;
1579 
1580 		/*
1581 		 * Move on to the next object.  The chain lock should prevent
1582 		 * the backing_object from getting ripped out from under us.
1583 		 *
1584 		 * vm_shared_fault case:
1585 		 *
1586 		 *	If the next object is the last object and
1587 		 *	vnode-backed (thus possibly shared), we can try a
1588 		 *	shared object lock.  There is no 'chain' for this
1589 		 *	last object if vnode-backed (otherwise we would
1590 		 *	need an exclusive lock).
1591 		 *
1592 		 *	fs->shared mode is very fragile and only works
1593 		 *	under certain specific conditions, and is only
1594 		 *	handled for those conditions in our loop.  Essentially
1595 		 *	it is designed only to be able to 'dip into' the
1596 		 *	vnode's object and extract an already-cached page.
1597 		 */
1598 		fs->shared = 0;
1599 		if ((next_object = fs->object->backing_object) != NULL) {
1600 			fs->shared = vm_object_hold_maybe_shared(next_object);
1601 			vm_object_chain_acquire(next_object);
1602 			KKASSERT(next_object == fs->object->backing_object);
1603 			pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1604 		}
1605 
1606 		if (next_object == NULL) {
1607 			/*
1608 			 * If there's no object left, fill the page in the top
1609 			 * object with zeros.
1610 			 */
1611 			if (fs->object != fs->first_object) {
1612 				if (fs->first_object->backing_object !=
1613 				    fs->object) {
1614 					vm_object_hold(fs->first_object->backing_object);
1615 				}
1616 				vm_object_chain_release_all(
1617 					fs->first_object->backing_object,
1618 					fs->object);
1619 				if (fs->first_object->backing_object !=
1620 				    fs->object) {
1621 					vm_object_drop(fs->first_object->backing_object);
1622 				}
1623 				vm_object_pip_wakeup(fs->object);
1624 				vm_object_drop(fs->object);
1625 				fs->object = fs->first_object;
1626 				pindex = first_pindex;
1627 				fs->m = fs->first_m;
1628 			}
1629 			fs->first_m = NULL;
1630 
1631 			/*
1632 			 * Zero the page if necessary and mark it valid.
1633 			 */
1634 			if ((fs->m->flags & PG_ZERO) == 0) {
1635 				vm_page_zero_fill(fs->m);
1636 			} else {
1637 #ifdef PMAP_DEBUG
1638 				pmap_page_assertzero(VM_PAGE_TO_PHYS(fs->m));
1639 #endif
1640 				vm_page_flag_clear(fs->m, PG_ZERO);
1641 				mycpu->gd_cnt.v_ozfod++;
1642 			}
1643 			mycpu->gd_cnt.v_zfod++;
1644 			fs->m->valid = VM_PAGE_BITS_ALL;
1645 			break;	/* break to PAGE HAS BEEN FOUND */
1646 		}
1647 		if (fs->object != fs->first_object) {
1648 			vm_object_pip_wakeup(fs->object);
1649 			vm_object_lock_swap();
1650 			vm_object_drop(fs->object);
1651 		}
1652 		KASSERT(fs->object != next_object,
1653 			("object loop %p", next_object));
1654 		fs->object = next_object;
1655 		vm_object_pip_add(fs->object, 1);
1656 	}
1657 
1658 	/*
1659 	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1660 	 * is held.]
1661 	 *
1662 	 * object still held.
1663 	 *
1664 	 * If the page is being written, but isn't already owned by the
1665 	 * top-level object, we have to copy it into a new page owned by the
1666 	 * top-level object.
1667 	 */
1668 	KASSERT((fs->m->flags & PG_BUSY) != 0,
1669 		("vm_fault: not busy after main loop"));
1670 
1671 	if (fs->object != fs->first_object) {
1672 		/*
1673 		 * We only really need to copy if we want to write it.
1674 		 */
1675 		if (fault_type & VM_PROT_WRITE) {
1676 			/*
1677 			 * This allows pages to be virtually copied from a
1678 			 * backing_object into the first_object, where the
1679 			 * backing object has no other refs to it, and cannot
1680 			 * gain any more refs.  Instead of a bcopy, we just
1681 			 * move the page from the backing object to the
1682 			 * first object.  Note that we must mark the page
1683 			 * dirty in the first object so that it will go out
1684 			 * to swap when needed.
1685 			 */
1686 			if (
1687 				/*
1688 				 * Map, if present, has not changed
1689 				 */
1690 				(fs->map == NULL ||
1691 				fs->map_generation == fs->map->timestamp) &&
1692 				/*
1693 				 * Only one shadow object
1694 				 */
1695 				(fs->object->shadow_count == 1) &&
1696 				/*
1697 				 * No COW refs, except us
1698 				 */
1699 				(fs->object->ref_count == 1) &&
1700 				/*
1701 				 * No one else can look this object up
1702 				 */
1703 				(fs->object->handle == NULL) &&
1704 				/*
1705 				 * No other ways to look the object up
1706 				 */
1707 				((fs->object->type == OBJT_DEFAULT) ||
1708 				 (fs->object->type == OBJT_SWAP)) &&
1709 				/*
1710 				 * We don't chase down the shadow chain
1711 				 */
1712 				(fs->object == fs->first_object->backing_object) &&
1713 
1714 				/*
1715 				 * grab the lock if we need to
1716 				 */
1717 				(fs->lookup_still_valid ||
1718 				 fs->map == NULL ||
1719 				 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1720 			    ) {
1721 				/*
1722 				 * (first_m) and (m) are both busied.  We have
1723 				 * move (m) into (first_m)'s object/pindex
1724 				 * in an atomic fashion, then free (first_m).
1725 				 *
1726 				 * first_object is held so second remove
1727 				 * followed by the rename should wind
1728 				 * up being atomic.  vm_page_free() might
1729 				 * block so we don't do it until after the
1730 				 * rename.
1731 				 */
1732 				fs->lookup_still_valid = 1;
1733 				vm_page_protect(fs->first_m, VM_PROT_NONE);
1734 				vm_page_remove(fs->first_m);
1735 				vm_page_rename(fs->m, fs->first_object,
1736 					       first_pindex);
1737 				vm_page_free(fs->first_m);
1738 				fs->first_m = fs->m;
1739 				fs->m = NULL;
1740 				mycpu->gd_cnt.v_cow_optim++;
1741 			} else {
1742 				/*
1743 				 * Oh, well, lets copy it.
1744 				 *
1745 				 * Why are we unmapping the original page
1746 				 * here?  Well, in short, not all accessors
1747 				 * of user memory go through the pmap.  The
1748 				 * procfs code doesn't have access user memory
1749 				 * via a local pmap, so vm_fault_page*()
1750 				 * can't call pmap_enter().  And the umtx*()
1751 				 * code may modify the COW'd page via a DMAP
1752 				 * or kernel mapping and not via the pmap,
1753 				 * leaving the original page still mapped
1754 				 * read-only into the pmap.
1755 				 *
1756 				 * So we have to remove the page from at
1757 				 * least the current pmap if it is in it.
1758 				 * Just remove it from all pmaps.
1759 				 */
1760 				vm_page_copy(fs->m, fs->first_m);
1761 				vm_page_protect(fs->m, VM_PROT_NONE);
1762 				vm_page_event(fs->m, VMEVENT_COW);
1763 			}
1764 
1765 			if (fs->m) {
1766 				/*
1767 				 * We no longer need the old page or object.
1768 				 */
1769 				release_page(fs);
1770 			}
1771 
1772 			/*
1773 			 * We intend to revert to first_object, undo the
1774 			 * chain lock through to that.
1775 			 */
1776 			if (fs->first_object->backing_object != fs->object)
1777 				vm_object_hold(fs->first_object->backing_object);
1778 			vm_object_chain_release_all(
1779 					fs->first_object->backing_object,
1780 					fs->object);
1781 			if (fs->first_object->backing_object != fs->object)
1782 				vm_object_drop(fs->first_object->backing_object);
1783 
1784 			/*
1785 			 * fs->object != fs->first_object due to above
1786 			 * conditional
1787 			 */
1788 			vm_object_pip_wakeup(fs->object);
1789 			vm_object_drop(fs->object);
1790 
1791 			/*
1792 			 * Only use the new page below...
1793 			 */
1794 
1795 			mycpu->gd_cnt.v_cow_faults++;
1796 			fs->m = fs->first_m;
1797 			fs->object = fs->first_object;
1798 			pindex = first_pindex;
1799 		} else {
1800 			/*
1801 			 * If it wasn't a write fault avoid having to copy
1802 			 * the page by mapping it read-only.
1803 			 */
1804 			fs->prot &= ~VM_PROT_WRITE;
1805 		}
1806 	}
1807 
1808 	/*
1809 	 * Relock the map if necessary, then check the generation count.
1810 	 * relock_map() will update fs->timestamp to account for the
1811 	 * relocking if necessary.
1812 	 *
1813 	 * If the count has changed after relocking then all sorts of
1814 	 * crap may have happened and we have to retry.
1815 	 *
1816 	 * NOTE: The relock_map() can fail due to a deadlock against
1817 	 *	 the vm_page we are holding BUSY.
1818 	 */
1819 	if (fs->lookup_still_valid == FALSE && fs->map) {
1820 		if (relock_map(fs) ||
1821 		    fs->map->timestamp != fs->map_generation) {
1822 			release_page(fs);
1823 			vm_object_pip_wakeup(fs->first_object);
1824 			vm_object_chain_release_all(fs->first_object,
1825 						    fs->object);
1826 			if (fs->object != fs->first_object)
1827 				vm_object_drop(fs->object);
1828 			unlock_and_deallocate(fs);
1829 			return (KERN_TRY_AGAIN);
1830 		}
1831 	}
1832 
1833 	/*
1834 	 * If the fault is a write, we know that this page is being
1835 	 * written NOW so dirty it explicitly to save on pmap_is_modified()
1836 	 * calls later.
1837 	 *
1838 	 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1839 	 * if the page is already dirty to prevent data written with
1840 	 * the expectation of being synced from not being synced.
1841 	 * Likewise if this entry does not request NOSYNC then make
1842 	 * sure the page isn't marked NOSYNC.  Applications sharing
1843 	 * data should use the same flags to avoid ping ponging.
1844 	 *
1845 	 * Also tell the backing pager, if any, that it should remove
1846 	 * any swap backing since the page is now dirty.
1847 	 */
1848 	vm_page_activate(fs->m);
1849 	if (fs->prot & VM_PROT_WRITE) {
1850 		vm_object_set_writeable_dirty(fs->m->object);
1851 		vm_set_nosync(fs->m, fs->entry);
1852 		if (fs->fault_flags & VM_FAULT_DIRTY) {
1853 			vm_page_dirty(fs->m);
1854 			swap_pager_unswapped(fs->m);
1855 		}
1856 	}
1857 
1858 	vm_object_pip_wakeup(fs->first_object);
1859 	vm_object_chain_release_all(fs->first_object, fs->object);
1860 	if (fs->object != fs->first_object)
1861 		vm_object_drop(fs->object);
1862 
1863 	/*
1864 	 * Page had better still be busy.  We are still locked up and
1865 	 * fs->object will have another PIP reference if it is not equal
1866 	 * to fs->first_object.
1867 	 */
1868 	KASSERT(fs->m->flags & PG_BUSY,
1869 		("vm_fault: page %p not busy!", fs->m));
1870 
1871 	/*
1872 	 * Sanity check: page must be completely valid or it is not fit to
1873 	 * map into user space.  vm_pager_get_pages() ensures this.
1874 	 */
1875 	if (fs->m->valid != VM_PAGE_BITS_ALL) {
1876 		vm_page_zero_invalid(fs->m, TRUE);
1877 		kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1878 	}
1879 	vm_page_flag_clear(fs->m, PG_ZERO);
1880 
1881 	return (KERN_SUCCESS);
1882 }
1883 
1884 /*
1885  * Hold each of the physical pages that are mapped by the specified range of
1886  * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
1887  * and allow the specified types of access, "prot".  If all of the implied
1888  * pages are successfully held, then the number of held pages is returned
1889  * together with pointers to those pages in the array "ma".  However, if any
1890  * of the pages cannot be held, -1 is returned.
1891  */
1892 int
1893 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
1894     vm_prot_t prot, vm_page_t *ma, int max_count)
1895 {
1896 	vm_offset_t start, end;
1897 	int i, npages, error;
1898 
1899 	start = trunc_page(addr);
1900 	end = round_page(addr + len);
1901 
1902 	npages = howmany(end - start, PAGE_SIZE);
1903 
1904 	if (npages > max_count)
1905 		return -1;
1906 
1907 	for (i = 0; i < npages; i++) {
1908 		// XXX error handling
1909 		ma[i] = vm_fault_page_quick(start + (i * PAGE_SIZE),
1910 			prot,
1911 			&error);
1912 	}
1913 
1914 	return npages;
1915 }
1916 
1917 /*
1918  * Wire down a range of virtual addresses in a map.  The entry in question
1919  * should be marked in-transition and the map must be locked.  We must
1920  * release the map temporarily while faulting-in the page to avoid a
1921  * deadlock.  Note that the entry may be clipped while we are blocked but
1922  * will never be freed.
1923  *
1924  * No requirements.
1925  */
1926 int
1927 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1928 {
1929 	boolean_t fictitious;
1930 	vm_offset_t start;
1931 	vm_offset_t end;
1932 	vm_offset_t va;
1933 	vm_paddr_t pa;
1934 	vm_page_t m;
1935 	pmap_t pmap;
1936 	int rv;
1937 
1938 	lwkt_gettoken(&map->token);
1939 
1940 	pmap = vm_map_pmap(map);
1941 	start = entry->start;
1942 	end = entry->end;
1943 	fictitious = entry->object.vm_object &&
1944 			((entry->object.vm_object->type == OBJT_DEVICE) ||
1945 			 (entry->object.vm_object->type == OBJT_MGTDEVICE));
1946 	if (entry->eflags & MAP_ENTRY_KSTACK)
1947 		start += PAGE_SIZE;
1948 	map->timestamp++;
1949 	vm_map_unlock(map);
1950 
1951 	/*
1952 	 * We simulate a fault to get the page and enter it in the physical
1953 	 * map.
1954 	 */
1955 	for (va = start; va < end; va += PAGE_SIZE) {
1956 		if (user_wire) {
1957 			rv = vm_fault(map, va, VM_PROT_READ,
1958 					VM_FAULT_USER_WIRE);
1959 		} else {
1960 			rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1961 					VM_FAULT_CHANGE_WIRING);
1962 		}
1963 		if (rv) {
1964 			while (va > start) {
1965 				va -= PAGE_SIZE;
1966 				if ((pa = pmap_extract(pmap, va)) == 0)
1967 					continue;
1968 				pmap_change_wiring(pmap, va, FALSE, entry);
1969 				if (!fictitious) {
1970 					m = PHYS_TO_VM_PAGE(pa);
1971 					vm_page_busy_wait(m, FALSE, "vmwrpg");
1972 					vm_page_unwire(m, 1);
1973 					vm_page_wakeup(m);
1974 				}
1975 			}
1976 			goto done;
1977 		}
1978 	}
1979 	rv = KERN_SUCCESS;
1980 done:
1981 	vm_map_lock(map);
1982 	lwkt_reltoken(&map->token);
1983 	return (rv);
1984 }
1985 
1986 /*
1987  * Unwire a range of virtual addresses in a map.  The map should be
1988  * locked.
1989  */
1990 void
1991 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1992 {
1993 	boolean_t fictitious;
1994 	vm_offset_t start;
1995 	vm_offset_t end;
1996 	vm_offset_t va;
1997 	vm_paddr_t pa;
1998 	vm_page_t m;
1999 	pmap_t pmap;
2000 
2001 	lwkt_gettoken(&map->token);
2002 
2003 	pmap = vm_map_pmap(map);
2004 	start = entry->start;
2005 	end = entry->end;
2006 	fictitious = entry->object.vm_object &&
2007 			((entry->object.vm_object->type == OBJT_DEVICE) ||
2008 			 (entry->object.vm_object->type == OBJT_MGTDEVICE));
2009 	if (entry->eflags & MAP_ENTRY_KSTACK)
2010 		start += PAGE_SIZE;
2011 
2012 	/*
2013 	 * Since the pages are wired down, we must be able to get their
2014 	 * mappings from the physical map system.
2015 	 */
2016 	for (va = start; va < end; va += PAGE_SIZE) {
2017 		pa = pmap_extract(pmap, va);
2018 		if (pa != 0) {
2019 			pmap_change_wiring(pmap, va, FALSE, entry);
2020 			if (!fictitious) {
2021 				m = PHYS_TO_VM_PAGE(pa);
2022 				vm_page_busy_wait(m, FALSE, "vmwupg");
2023 				vm_page_unwire(m, 1);
2024 				vm_page_wakeup(m);
2025 			}
2026 		}
2027 	}
2028 	lwkt_reltoken(&map->token);
2029 }
2030 
2031 /*
2032  * Copy all of the pages from a wired-down map entry to another.
2033  *
2034  * The source and destination maps must be locked for write.
2035  * The source and destination maps token must be held
2036  * The source map entry must be wired down (or be a sharing map
2037  * entry corresponding to a main map entry that is wired down).
2038  *
2039  * No other requirements.
2040  *
2041  * XXX do segment optimization
2042  */
2043 void
2044 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
2045 		    vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
2046 {
2047 	vm_object_t dst_object;
2048 	vm_object_t src_object;
2049 	vm_ooffset_t dst_offset;
2050 	vm_ooffset_t src_offset;
2051 	vm_prot_t prot;
2052 	vm_offset_t vaddr;
2053 	vm_page_t dst_m;
2054 	vm_page_t src_m;
2055 
2056 	src_object = src_entry->object.vm_object;
2057 	src_offset = src_entry->offset;
2058 
2059 	/*
2060 	 * Create the top-level object for the destination entry. (Doesn't
2061 	 * actually shadow anything - we copy the pages directly.)
2062 	 */
2063 	vm_map_entry_allocate_object(dst_entry);
2064 	dst_object = dst_entry->object.vm_object;
2065 
2066 	prot = dst_entry->max_protection;
2067 
2068 	/*
2069 	 * Loop through all of the pages in the entry's range, copying each
2070 	 * one from the source object (it should be there) to the destination
2071 	 * object.
2072 	 */
2073 	vm_object_hold(src_object);
2074 	vm_object_hold(dst_object);
2075 	for (vaddr = dst_entry->start, dst_offset = 0;
2076 	    vaddr < dst_entry->end;
2077 	    vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
2078 
2079 		/*
2080 		 * Allocate a page in the destination object
2081 		 */
2082 		do {
2083 			dst_m = vm_page_alloc(dst_object,
2084 					      OFF_TO_IDX(dst_offset),
2085 					      VM_ALLOC_NORMAL);
2086 			if (dst_m == NULL) {
2087 				vm_wait(0);
2088 			}
2089 		} while (dst_m == NULL);
2090 
2091 		/*
2092 		 * Find the page in the source object, and copy it in.
2093 		 * (Because the source is wired down, the page will be in
2094 		 * memory.)
2095 		 */
2096 		src_m = vm_page_lookup(src_object,
2097 				       OFF_TO_IDX(dst_offset + src_offset));
2098 		if (src_m == NULL)
2099 			panic("vm_fault_copy_wired: page missing");
2100 
2101 		vm_page_copy(src_m, dst_m);
2102 		vm_page_event(src_m, VMEVENT_COW);
2103 
2104 		/*
2105 		 * Enter it in the pmap...
2106 		 */
2107 
2108 		vm_page_flag_clear(dst_m, PG_ZERO);
2109 		pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry);
2110 
2111 		/*
2112 		 * Mark it no longer busy, and put it on the active list.
2113 		 */
2114 		vm_page_activate(dst_m);
2115 		vm_page_wakeup(dst_m);
2116 	}
2117 	vm_object_drop(dst_object);
2118 	vm_object_drop(src_object);
2119 }
2120 
2121 #if 0
2122 
2123 /*
2124  * This routine checks around the requested page for other pages that
2125  * might be able to be faulted in.  This routine brackets the viable
2126  * pages for the pages to be paged in.
2127  *
2128  * Inputs:
2129  *	m, rbehind, rahead
2130  *
2131  * Outputs:
2132  *  marray (array of vm_page_t), reqpage (index of requested page)
2133  *
2134  * Return value:
2135  *  number of pages in marray
2136  */
2137 static int
2138 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
2139 			  vm_page_t *marray, int *reqpage)
2140 {
2141 	int i,j;
2142 	vm_object_t object;
2143 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
2144 	vm_page_t rtm;
2145 	int cbehind, cahead;
2146 
2147 	object = m->object;
2148 	pindex = m->pindex;
2149 
2150 	/*
2151 	 * we don't fault-ahead for device pager
2152 	 */
2153 	if ((object->type == OBJT_DEVICE) ||
2154 	    (object->type == OBJT_MGTDEVICE)) {
2155 		*reqpage = 0;
2156 		marray[0] = m;
2157 		return 1;
2158 	}
2159 
2160 	/*
2161 	 * if the requested page is not available, then give up now
2162 	 */
2163 	if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
2164 		*reqpage = 0;	/* not used by caller, fix compiler warn */
2165 		return 0;
2166 	}
2167 
2168 	if ((cbehind == 0) && (cahead == 0)) {
2169 		*reqpage = 0;
2170 		marray[0] = m;
2171 		return 1;
2172 	}
2173 
2174 	if (rahead > cahead) {
2175 		rahead = cahead;
2176 	}
2177 
2178 	if (rbehind > cbehind) {
2179 		rbehind = cbehind;
2180 	}
2181 
2182 	/*
2183 	 * Do not do any readahead if we have insufficient free memory.
2184 	 *
2185 	 * XXX code was broken disabled before and has instability
2186 	 * with this conditonal fixed, so shortcut for now.
2187 	 */
2188 	if (burst_fault == 0 || vm_page_count_severe()) {
2189 		marray[0] = m;
2190 		*reqpage = 0;
2191 		return 1;
2192 	}
2193 
2194 	/*
2195 	 * scan backward for the read behind pages -- in memory
2196 	 *
2197 	 * Assume that if the page is not found an interrupt will not
2198 	 * create it.  Theoretically interrupts can only remove (busy)
2199 	 * pages, not create new associations.
2200 	 */
2201 	if (pindex > 0) {
2202 		if (rbehind > pindex) {
2203 			rbehind = pindex;
2204 			startpindex = 0;
2205 		} else {
2206 			startpindex = pindex - rbehind;
2207 		}
2208 
2209 		vm_object_hold(object);
2210 		for (tpindex = pindex; tpindex > startpindex; --tpindex) {
2211 			if (vm_page_lookup(object, tpindex - 1))
2212 				break;
2213 		}
2214 
2215 		i = 0;
2216 		while (tpindex < pindex) {
2217 			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2218 							     VM_ALLOC_NULL_OK);
2219 			if (rtm == NULL) {
2220 				for (j = 0; j < i; j++) {
2221 					vm_page_free(marray[j]);
2222 				}
2223 				vm_object_drop(object);
2224 				marray[0] = m;
2225 				*reqpage = 0;
2226 				return 1;
2227 			}
2228 			marray[i] = rtm;
2229 			++i;
2230 			++tpindex;
2231 		}
2232 		vm_object_drop(object);
2233 	} else {
2234 		i = 0;
2235 	}
2236 
2237 	/*
2238 	 * Assign requested page
2239 	 */
2240 	marray[i] = m;
2241 	*reqpage = i;
2242 	++i;
2243 
2244 	/*
2245 	 * Scan forwards for read-ahead pages
2246 	 */
2247 	tpindex = pindex + 1;
2248 	endpindex = tpindex + rahead;
2249 	if (endpindex > object->size)
2250 		endpindex = object->size;
2251 
2252 	vm_object_hold(object);
2253 	while (tpindex < endpindex) {
2254 		if (vm_page_lookup(object, tpindex))
2255 			break;
2256 		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2257 						     VM_ALLOC_NULL_OK);
2258 		if (rtm == NULL)
2259 			break;
2260 		marray[i] = rtm;
2261 		++i;
2262 		++tpindex;
2263 	}
2264 	vm_object_drop(object);
2265 
2266 	return (i);
2267 }
2268 
2269 #endif
2270 
2271 /*
2272  * vm_prefault() provides a quick way of clustering pagefaults into a
2273  * processes address space.  It is a "cousin" of pmap_object_init_pt,
2274  * except it runs at page fault time instead of mmap time.
2275  *
2276  * vm.fast_fault	Enables pre-faulting zero-fill pages
2277  *
2278  * vm.prefault_pages	Number of pages (1/2 negative, 1/2 positive) to
2279  *			prefault.  Scan stops in either direction when
2280  *			a page is found to already exist.
2281  *
2282  * This code used to be per-platform pmap_prefault().  It is now
2283  * machine-independent and enhanced to also pre-fault zero-fill pages
2284  * (see vm.fast_fault) as well as make them writable, which greatly
2285  * reduces the number of page faults programs incur.
2286  *
2287  * Application performance when pre-faulting zero-fill pages is heavily
2288  * dependent on the application.  Very tiny applications like /bin/echo
2289  * lose a little performance while applications of any appreciable size
2290  * gain performance.  Prefaulting multiple pages also reduces SMP
2291  * congestion and can improve SMP performance significantly.
2292  *
2293  * NOTE!  prot may allow writing but this only applies to the top level
2294  *	  object.  If we wind up mapping a page extracted from a backing
2295  *	  object we have to make sure it is read-only.
2296  *
2297  * NOTE!  The caller has already handled any COW operations on the
2298  *	  vm_map_entry via the normal fault code.  Do NOT call this
2299  *	  shortcut unless the normal fault code has run on this entry.
2300  *
2301  * The related map must be locked.
2302  * No other requirements.
2303  */
2304 static int vm_prefault_pages = 8;
2305 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0,
2306 	   "Maximum number of pages to pre-fault");
2307 static int vm_fast_fault = 1;
2308 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
2309 	   "Burst fault zero-fill regions");
2310 
2311 /*
2312  * Set PG_NOSYNC if the map entry indicates so, but only if the page
2313  * is not already dirty by other means.  This will prevent passive
2314  * filesystem syncing as well as 'sync' from writing out the page.
2315  */
2316 static void
2317 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
2318 {
2319 	if (entry->eflags & MAP_ENTRY_NOSYNC) {
2320 		if (m->dirty == 0)
2321 			vm_page_flag_set(m, PG_NOSYNC);
2322 	} else {
2323 		vm_page_flag_clear(m, PG_NOSYNC);
2324 	}
2325 }
2326 
2327 static void
2328 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot,
2329 	    int fault_flags)
2330 {
2331 	struct lwp *lp;
2332 	vm_page_t m;
2333 	vm_offset_t addr;
2334 	vm_pindex_t index;
2335 	vm_pindex_t pindex;
2336 	vm_object_t object;
2337 	int pprot;
2338 	int i;
2339 	int noneg;
2340 	int nopos;
2341 	int maxpages;
2342 
2343 	/*
2344 	 * Get stable max count value, disabled if set to 0
2345 	 */
2346 	maxpages = vm_prefault_pages;
2347 	cpu_ccfence();
2348 	if (maxpages <= 0)
2349 		return;
2350 
2351 	/*
2352 	 * We do not currently prefault mappings that use virtual page
2353 	 * tables.  We do not prefault foreign pmaps.
2354 	 */
2355 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2356 		return;
2357 	lp = curthread->td_lwp;
2358 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2359 		return;
2360 
2361 	/*
2362 	 * Limit pre-fault count to 1024 pages.
2363 	 */
2364 	if (maxpages > 1024)
2365 		maxpages = 1024;
2366 
2367 	object = entry->object.vm_object;
2368 	KKASSERT(object != NULL);
2369 	KKASSERT(object == entry->object.vm_object);
2370 	vm_object_hold(object);
2371 	vm_object_chain_acquire(object);
2372 
2373 	noneg = 0;
2374 	nopos = 0;
2375 	for (i = 0; i < maxpages; ++i) {
2376 		vm_object_t lobject;
2377 		vm_object_t nobject;
2378 		int allocated = 0;
2379 		int error;
2380 
2381 		/*
2382 		 * This can eat a lot of time on a heavily contended
2383 		 * machine so yield on the tick if needed.
2384 		 */
2385 		if ((i & 7) == 7)
2386 			lwkt_yield();
2387 
2388 		/*
2389 		 * Calculate the page to pre-fault, stopping the scan in
2390 		 * each direction separately if the limit is reached.
2391 		 */
2392 		if (i & 1) {
2393 			if (noneg)
2394 				continue;
2395 			addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2396 		} else {
2397 			if (nopos)
2398 				continue;
2399 			addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2400 		}
2401 		if (addr < entry->start) {
2402 			noneg = 1;
2403 			if (noneg && nopos)
2404 				break;
2405 			continue;
2406 		}
2407 		if (addr >= entry->end) {
2408 			nopos = 1;
2409 			if (noneg && nopos)
2410 				break;
2411 			continue;
2412 		}
2413 
2414 		/*
2415 		 * Skip pages already mapped, and stop scanning in that
2416 		 * direction.  When the scan terminates in both directions
2417 		 * we are done.
2418 		 */
2419 		if (pmap_prefault_ok(pmap, addr) == 0) {
2420 			if (i & 1)
2421 				noneg = 1;
2422 			else
2423 				nopos = 1;
2424 			if (noneg && nopos)
2425 				break;
2426 			continue;
2427 		}
2428 
2429 		/*
2430 		 * Follow the VM object chain to obtain the page to be mapped
2431 		 * into the pmap.
2432 		 *
2433 		 * If we reach the terminal object without finding a page
2434 		 * and we determine it would be advantageous, then allocate
2435 		 * a zero-fill page for the base object.  The base object
2436 		 * is guaranteed to be OBJT_DEFAULT for this case.
2437 		 *
2438 		 * In order to not have to check the pager via *haspage*()
2439 		 * we stop if any non-default object is encountered.  e.g.
2440 		 * a vnode or swap object would stop the loop.
2441 		 */
2442 		index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2443 		lobject = object;
2444 		pindex = index;
2445 		pprot = prot;
2446 
2447 		KKASSERT(lobject == entry->object.vm_object);
2448 		/*vm_object_hold(lobject); implied */
2449 
2450 		while ((m = vm_page_lookup_busy_try(lobject, pindex,
2451 						    TRUE, &error)) == NULL) {
2452 			if (lobject->type != OBJT_DEFAULT)
2453 				break;
2454 			if (lobject->backing_object == NULL) {
2455 				if (vm_fast_fault == 0)
2456 					break;
2457 				if ((prot & VM_PROT_WRITE) == 0 ||
2458 				    vm_page_count_min(0)) {
2459 					break;
2460 				}
2461 
2462 				/*
2463 				 * NOTE: Allocated from base object
2464 				 */
2465 				m = vm_page_alloc(object, index,
2466 						  VM_ALLOC_NORMAL |
2467 						  VM_ALLOC_ZERO |
2468 						  VM_ALLOC_USE_GD |
2469 						  VM_ALLOC_NULL_OK);
2470 				if (m == NULL)
2471 					break;
2472 				allocated = 1;
2473 				pprot = prot;
2474 				/* lobject = object .. not needed */
2475 				break;
2476 			}
2477 			if (lobject->backing_object_offset & PAGE_MASK)
2478 				break;
2479 			nobject = lobject->backing_object;
2480 			vm_object_hold(nobject);
2481 			KKASSERT(nobject == lobject->backing_object);
2482 			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2483 			if (lobject != object) {
2484 				vm_object_lock_swap();
2485 				vm_object_drop(lobject);
2486 			}
2487 			lobject = nobject;
2488 			pprot &= ~VM_PROT_WRITE;
2489 			vm_object_chain_acquire(lobject);
2490 		}
2491 
2492 		/*
2493 		 * NOTE: A non-NULL (m) will be associated with lobject if
2494 		 *	 it was found there, otherwise it is probably a
2495 		 *	 zero-fill page associated with the base object.
2496 		 *
2497 		 * Give-up if no page is available.
2498 		 */
2499 		if (m == NULL) {
2500 			if (lobject != object) {
2501 				if (object->backing_object != lobject)
2502 					vm_object_hold(object->backing_object);
2503 				vm_object_chain_release_all(
2504 					object->backing_object, lobject);
2505 				if (object->backing_object != lobject)
2506 					vm_object_drop(object->backing_object);
2507 				vm_object_drop(lobject);
2508 			}
2509 			break;
2510 		}
2511 
2512 		/*
2513 		 * The object must be marked dirty if we are mapping a
2514 		 * writable page.  m->object is either lobject or object,
2515 		 * both of which are still held.  Do this before we
2516 		 * potentially drop the object.
2517 		 */
2518 		if (pprot & VM_PROT_WRITE)
2519 			vm_object_set_writeable_dirty(m->object);
2520 
2521 		/*
2522 		 * Do not conditionalize on PG_RAM.  If pages are present in
2523 		 * the VM system we assume optimal caching.  If caching is
2524 		 * not optimal the I/O gravy train will be restarted when we
2525 		 * hit an unavailable page.  We do not want to try to restart
2526 		 * the gravy train now because we really don't know how much
2527 		 * of the object has been cached.  The cost for restarting
2528 		 * the gravy train should be low (since accesses will likely
2529 		 * be I/O bound anyway).
2530 		 */
2531 		if (lobject != object) {
2532 			if (object->backing_object != lobject)
2533 				vm_object_hold(object->backing_object);
2534 			vm_object_chain_release_all(object->backing_object,
2535 						    lobject);
2536 			if (object->backing_object != lobject)
2537 				vm_object_drop(object->backing_object);
2538 			vm_object_drop(lobject);
2539 		}
2540 
2541 		/*
2542 		 * Enter the page into the pmap if appropriate.  If we had
2543 		 * allocated the page we have to place it on a queue.  If not
2544 		 * we just have to make sure it isn't on the cache queue
2545 		 * (pages on the cache queue are not allowed to be mapped).
2546 		 */
2547 		if (allocated) {
2548 			/*
2549 			 * Page must be zerod.
2550 			 */
2551 			if ((m->flags & PG_ZERO) == 0) {
2552 				vm_page_zero_fill(m);
2553 			} else {
2554 #ifdef PMAP_DEBUG
2555 				pmap_page_assertzero(
2556 						VM_PAGE_TO_PHYS(m));
2557 #endif
2558 				vm_page_flag_clear(m, PG_ZERO);
2559 				mycpu->gd_cnt.v_ozfod++;
2560 			}
2561 			mycpu->gd_cnt.v_zfod++;
2562 			m->valid = VM_PAGE_BITS_ALL;
2563 
2564 			/*
2565 			 * Handle dirty page case
2566 			 */
2567 			if (pprot & VM_PROT_WRITE)
2568 				vm_set_nosync(m, entry);
2569 			pmap_enter(pmap, addr, m, pprot, 0, entry);
2570 			mycpu->gd_cnt.v_vm_faults++;
2571 			if (curthread->td_lwp)
2572 				++curthread->td_lwp->lwp_ru.ru_minflt;
2573 			vm_page_deactivate(m);
2574 			if (pprot & VM_PROT_WRITE) {
2575 				/*vm_object_set_writeable_dirty(m->object);*/
2576 				vm_set_nosync(m, entry);
2577 				if (fault_flags & VM_FAULT_DIRTY) {
2578 					vm_page_dirty(m);
2579 					/*XXX*/
2580 					swap_pager_unswapped(m);
2581 				}
2582 			}
2583 			vm_page_wakeup(m);
2584 		} else if (error) {
2585 			/* couldn't busy page, no wakeup */
2586 		} else if (
2587 		    ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2588 		    (m->flags & PG_FICTITIOUS) == 0) {
2589 			/*
2590 			 * A fully valid page not undergoing soft I/O can
2591 			 * be immediately entered into the pmap.
2592 			 */
2593 			if ((m->queue - m->pc) == PQ_CACHE)
2594 				vm_page_deactivate(m);
2595 			if (pprot & VM_PROT_WRITE) {
2596 				/*vm_object_set_writeable_dirty(m->object);*/
2597 				vm_set_nosync(m, entry);
2598 				if (fault_flags & VM_FAULT_DIRTY) {
2599 					vm_page_dirty(m);
2600 					/*XXX*/
2601 					swap_pager_unswapped(m);
2602 				}
2603 			}
2604 			if (pprot & VM_PROT_WRITE)
2605 				vm_set_nosync(m, entry);
2606 			pmap_enter(pmap, addr, m, pprot, 0, entry);
2607 			mycpu->gd_cnt.v_vm_faults++;
2608 			if (curthread->td_lwp)
2609 				++curthread->td_lwp->lwp_ru.ru_minflt;
2610 			vm_page_wakeup(m);
2611 		} else {
2612 			vm_page_wakeup(m);
2613 		}
2614 	}
2615 	vm_object_chain_release(object);
2616 	vm_object_drop(object);
2617 }
2618 
2619 static void
2620 vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
2621 		  vm_map_entry_t entry, int prot, int fault_flags)
2622 {
2623 	struct lwp *lp;
2624 	vm_page_t m;
2625 	vm_offset_t addr;
2626 	vm_pindex_t pindex;
2627 	vm_object_t object;
2628 	int i;
2629 	int noneg;
2630 	int nopos;
2631 	int maxpages;
2632 
2633 	/*
2634 	 * Get stable max count value, disabled if set to 0
2635 	 */
2636 	maxpages = vm_prefault_pages;
2637 	cpu_ccfence();
2638 	if (maxpages <= 0)
2639 		return;
2640 
2641 	/*
2642 	 * We do not currently prefault mappings that use virtual page
2643 	 * tables.  We do not prefault foreign pmaps.
2644 	 */
2645 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2646 		return;
2647 	lp = curthread->td_lwp;
2648 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2649 		return;
2650 
2651 	/*
2652 	 * Limit pre-fault count to 1024 pages.
2653 	 */
2654 	if (maxpages > 1024)
2655 		maxpages = 1024;
2656 
2657 	object = entry->object.vm_object;
2658 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2659 	KKASSERT(object->backing_object == NULL);
2660 
2661 	noneg = 0;
2662 	nopos = 0;
2663 	for (i = 0; i < maxpages; ++i) {
2664 		int error;
2665 
2666 		/*
2667 		 * Calculate the page to pre-fault, stopping the scan in
2668 		 * each direction separately if the limit is reached.
2669 		 */
2670 		if (i & 1) {
2671 			if (noneg)
2672 				continue;
2673 			addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2674 		} else {
2675 			if (nopos)
2676 				continue;
2677 			addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2678 		}
2679 		if (addr < entry->start) {
2680 			noneg = 1;
2681 			if (noneg && nopos)
2682 				break;
2683 			continue;
2684 		}
2685 		if (addr >= entry->end) {
2686 			nopos = 1;
2687 			if (noneg && nopos)
2688 				break;
2689 			continue;
2690 		}
2691 
2692 		/*
2693 		 * Skip pages already mapped, and stop scanning in that
2694 		 * direction.  When the scan terminates in both directions
2695 		 * we are done.
2696 		 */
2697 		if (pmap_prefault_ok(pmap, addr) == 0) {
2698 			if (i & 1)
2699 				noneg = 1;
2700 			else
2701 				nopos = 1;
2702 			if (noneg && nopos)
2703 				break;
2704 			continue;
2705 		}
2706 
2707 		/*
2708 		 * Follow the VM object chain to obtain the page to be mapped
2709 		 * into the pmap.  This version of the prefault code only
2710 		 * works with terminal objects.
2711 		 *
2712 		 * WARNING!  We cannot call swap_pager_unswapped() with a
2713 		 *	     shared token.
2714 		 */
2715 		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2716 
2717 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2718 		if (m == NULL || error)
2719 			continue;
2720 
2721 		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2722 		    (m->flags & PG_FICTITIOUS) == 0 &&
2723 		    ((m->flags & PG_SWAPPED) == 0 ||
2724 		     (prot & VM_PROT_WRITE) == 0 ||
2725 		     (fault_flags & VM_FAULT_DIRTY) == 0)) {
2726 			/*
2727 			 * A fully valid page not undergoing soft I/O can
2728 			 * be immediately entered into the pmap.
2729 			 */
2730 			if ((m->queue - m->pc) == PQ_CACHE)
2731 				vm_page_deactivate(m);
2732 			if (prot & VM_PROT_WRITE) {
2733 				vm_object_set_writeable_dirty(m->object);
2734 				vm_set_nosync(m, entry);
2735 				if (fault_flags & VM_FAULT_DIRTY) {
2736 					vm_page_dirty(m);
2737 					/*XXX*/
2738 					swap_pager_unswapped(m);
2739 				}
2740 			}
2741 			pmap_enter(pmap, addr, m, prot, 0, entry);
2742 			mycpu->gd_cnt.v_vm_faults++;
2743 			if (curthread->td_lwp)
2744 				++curthread->td_lwp->lwp_ru.ru_minflt;
2745 		}
2746 		vm_page_wakeup(m);
2747 	}
2748 }
2749