xref: /csrg-svn/sys/vm/vm_fault.c (revision 47663)
145748Smckusick /*
2*47663Smckusick  * Copyright (c) 1991 Regents of the University of California.
345748Smckusick  * All rights reserved.
445748Smckusick  *
5*47663Smckusick  * This code is derived from software contributed to Berkeley by
6*47663Smckusick  * The Mach Operating System project at Carnegie-Mellon University.
7*47663Smckusick  *
8*47663Smckusick  * %sccs.include.redist.c%
9*47663Smckusick  *
10*47663Smckusick  *	@(#)vm_fault.c	7.4 (Berkeley) 03/27/91
11*47663Smckusick  *
12*47663Smckusick  *
13*47663Smckusick  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14*47663Smckusick  * All rights reserved.
15*47663Smckusick  *
1647592Smckusick  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
1747592Smckusick  *
1847592Smckusick  * Permission to use, copy, modify and distribute this software and
1947592Smckusick  * its documentation is hereby granted, provided that both the copyright
2047592Smckusick  * notice and this permission notice appear in all copies of the
2147592Smckusick  * software, derivative works or modified versions, and any portions
2247592Smckusick  * thereof, and that both notices appear in supporting documentation.
2347592Smckusick  *
2447592Smckusick  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
2547592Smckusick  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
2647592Smckusick  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
2747592Smckusick  *
28*47663Smckusick  * Carnegie Mellon requests users of this software to return to
2945748Smckusick  *
30*47663Smckusick  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31*47663Smckusick  *  School of Computer Science
32*47663Smckusick  *  Carnegie Mellon University
33*47663Smckusick  *  Pittsburgh PA 15213-3890
3447662Smckusick  *
35*47663Smckusick  * any improvements or extensions that they make and grant Carnegie the
36*47663Smckusick  * rights to redistribute these changes.
3745748Smckusick  */
3845748Smckusick 
3945748Smckusick /*
4045748Smckusick  *	Page fault handling module.
4145748Smckusick  */
4245748Smckusick 
4345748Smckusick #include "param.h"
4445748Smckusick #include "../vm/vm_param.h"
4545748Smckusick #include "../vm/vm_map.h"
4645748Smckusick #include "../vm/vm_object.h"
4745748Smckusick #include "../vm/vm_page.h"
4845748Smckusick #include "../vm/pmap.h"
4945748Smckusick #include "../vm/vm_statistics.h"
5045748Smckusick #include "../vm/vm_pageout.h"
5145748Smckusick 
5245748Smckusick /*
5345748Smckusick  *	vm_fault:
5445748Smckusick  *
5545748Smckusick  *	Handle a page fault occuring at the given address,
5645748Smckusick  *	requiring the given permissions, in the map specified.
5745748Smckusick  *	If successful, the page is inserted into the
5845748Smckusick  *	associated physical map.
5945748Smckusick  *
6045748Smckusick  *	NOTE: the given address should be truncated to the
6145748Smckusick  *	proper page address.
6245748Smckusick  *
6345748Smckusick  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
6445748Smckusick  *	a standard error specifying why the fault is fatal is returned.
6545748Smckusick  *
6645748Smckusick  *
6745748Smckusick  *	The map in question must be referenced, and remains so.
6845748Smckusick  *	Caller may hold no locks.
6945748Smckusick  */
7045748Smckusick vm_fault(map, vaddr, fault_type, change_wiring)
7145748Smckusick 	vm_map_t	map;
7245748Smckusick 	vm_offset_t	vaddr;
7345748Smckusick 	vm_prot_t	fault_type;
7445748Smckusick 	boolean_t	change_wiring;
7545748Smckusick {
7645748Smckusick 	vm_object_t		first_object;
7745748Smckusick 	vm_offset_t		first_offset;
7845748Smckusick 	vm_map_entry_t		entry;
7945748Smckusick 	register vm_object_t	object;
8045748Smckusick 	register vm_offset_t	offset;
8145748Smckusick 	register vm_page_t	m;
8245748Smckusick 	vm_page_t		first_m;
8345748Smckusick 	vm_prot_t		prot;
8445748Smckusick 	int			result;
8545748Smckusick 	boolean_t		wired;
8645748Smckusick 	boolean_t		su;
8745748Smckusick 	boolean_t		lookup_still_valid;
8845748Smckusick 	boolean_t		page_exists;
8945748Smckusick 	vm_page_t		old_m;
9045748Smckusick 	vm_object_t		next_object;
9145748Smckusick 
9245748Smckusick 	vm_stat.faults++;		/* needs lock XXX */
9345748Smckusick /*
9445748Smckusick  *	Recovery actions
9545748Smckusick  */
9645748Smckusick #define	FREE_PAGE(m)	{				\
9745748Smckusick 	PAGE_WAKEUP(m);					\
9845748Smckusick 	vm_page_lock_queues();				\
9945748Smckusick 	vm_page_free(m);				\
10045748Smckusick 	vm_page_unlock_queues();			\
10145748Smckusick }
10245748Smckusick 
10345748Smckusick #define	RELEASE_PAGE(m)	{				\
10445748Smckusick 	PAGE_WAKEUP(m);					\
10545748Smckusick 	vm_page_lock_queues();				\
10645748Smckusick 	vm_page_activate(m);				\
10745748Smckusick 	vm_page_unlock_queues();			\
10845748Smckusick }
10945748Smckusick 
11045748Smckusick #define	UNLOCK_MAP	{				\
11145748Smckusick 	if (lookup_still_valid) {			\
11245748Smckusick 		vm_map_lookup_done(map, entry);		\
11345748Smckusick 		lookup_still_valid = FALSE;		\
11445748Smckusick 	}						\
11545748Smckusick }
11645748Smckusick 
11745748Smckusick #define	UNLOCK_THINGS	{				\
11845748Smckusick 	object->paging_in_progress--;			\
11945748Smckusick 	vm_object_unlock(object);			\
12045748Smckusick 	if (object != first_object) {			\
12145748Smckusick 		vm_object_lock(first_object);		\
12245748Smckusick 		FREE_PAGE(first_m);			\
12345748Smckusick 		first_object->paging_in_progress--;	\
12445748Smckusick 		vm_object_unlock(first_object);		\
12545748Smckusick 	}						\
12645748Smckusick 	UNLOCK_MAP;					\
12745748Smckusick }
12845748Smckusick 
12945748Smckusick #define	UNLOCK_AND_DEALLOCATE	{			\
13045748Smckusick 	UNLOCK_THINGS;					\
13145748Smckusick 	vm_object_deallocate(first_object);		\
13245748Smckusick }
13345748Smckusick 
13445748Smckusick     RetryFault: ;
13545748Smckusick 
13645748Smckusick 	/*
13745748Smckusick 	 *	Find the backing store object and offset into
13845748Smckusick 	 *	it to begin the search.
13945748Smckusick 	 */
14045748Smckusick 
14145748Smckusick 	if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
14245748Smckusick 			&first_object, &first_offset,
14345748Smckusick 			&prot, &wired, &su)) != KERN_SUCCESS) {
14445748Smckusick 		return(result);
14545748Smckusick 	}
14645748Smckusick 	lookup_still_valid = TRUE;
14745748Smckusick 
14845748Smckusick 	if (wired)
14945748Smckusick 		fault_type = prot;
15045748Smckusick 
15145748Smckusick 	first_m = VM_PAGE_NULL;
15245748Smckusick 
15345748Smckusick    	/*
15445748Smckusick 	 *	Make a reference to this object to
15545748Smckusick 	 *	prevent its disposal while we are messing with
15645748Smckusick 	 *	it.  Once we have the reference, the map is free
15745748Smckusick 	 *	to be diddled.  Since objects reference their
15845748Smckusick 	 *	shadows (and copies), they will stay around as well.
15945748Smckusick 	 */
16045748Smckusick 
16145748Smckusick 	vm_object_lock(first_object);
16245748Smckusick 
16345748Smckusick 	first_object->ref_count++;
16445748Smckusick 	first_object->paging_in_progress++;
16545748Smckusick 
16645748Smckusick 	/*
16745748Smckusick 	 *	INVARIANTS (through entire routine):
16845748Smckusick 	 *
16945748Smckusick 	 *	1)	At all times, we must either have the object
17045748Smckusick 	 *		lock or a busy page in some object to prevent
17145748Smckusick 	 *		some other thread from trying to bring in
17245748Smckusick 	 *		the same page.
17345748Smckusick 	 *
17445748Smckusick 	 *		Note that we cannot hold any locks during the
17545748Smckusick 	 *		pager access or when waiting for memory, so
17645748Smckusick 	 *		we use a busy page then.
17745748Smckusick 	 *
17845748Smckusick 	 *		Note also that we aren't as concerned about
17945748Smckusick 	 *		more than one thead attempting to pager_data_unlock
18045748Smckusick 	 *		the same page at once, so we don't hold the page
18145748Smckusick 	 *		as busy then, but do record the highest unlock
18245748Smckusick 	 *		value so far.  [Unlock requests may also be delivered
18345748Smckusick 	 *		out of order.]
18445748Smckusick 	 *
18545748Smckusick 	 *	2)	Once we have a busy page, we must remove it from
18645748Smckusick 	 *		the pageout queues, so that the pageout daemon
18745748Smckusick 	 *		will not grab it away.
18845748Smckusick 	 *
18945748Smckusick 	 *	3)	To prevent another thread from racing us down the
19045748Smckusick 	 *		shadow chain and entering a new page in the top
19145748Smckusick 	 *		object before we do, we must keep a busy page in
19245748Smckusick 	 *		the top object while following the shadow chain.
19345748Smckusick 	 *
19445748Smckusick 	 *	4)	We must increment paging_in_progress on any object
19545748Smckusick 	 *		for which we have a busy page, to prevent
19645748Smckusick 	 *		vm_object_collapse from removing the busy page
19745748Smckusick 	 *		without our noticing.
19845748Smckusick 	 */
19945748Smckusick 
20045748Smckusick 	/*
20145748Smckusick 	 *	Search for the page at object/offset.
20245748Smckusick 	 */
20345748Smckusick 
20445748Smckusick 	object = first_object;
20545748Smckusick 	offset = first_offset;
20645748Smckusick 
20745748Smckusick 	/*
20845748Smckusick 	 *	See whether this page is resident
20945748Smckusick 	 */
21045748Smckusick 
21145748Smckusick 	while (TRUE) {
21245748Smckusick 		m = vm_page_lookup(object, offset);
21345748Smckusick 		if (m != VM_PAGE_NULL) {
21445748Smckusick 			/*
21545748Smckusick 			 *	If the page is being brought in,
21645748Smckusick 			 *	wait for it and then retry.
21745748Smckusick 			 */
21845748Smckusick 			if (m->busy) {
21945748Smckusick #ifdef DOTHREADS
22045748Smckusick 				int	wait_result;
22145748Smckusick 
22245748Smckusick 				PAGE_ASSERT_WAIT(m, !change_wiring);
22345748Smckusick 				UNLOCK_THINGS;
22445748Smckusick 				thread_block();
22545748Smckusick 				wait_result = current_thread()->wait_result;
22645748Smckusick 				vm_object_deallocate(first_object);
22745748Smckusick 				if (wait_result != THREAD_AWAKENED)
22845748Smckusick 					return(KERN_SUCCESS);
22945748Smckusick 				goto RetryFault;
23045748Smckusick #else
23145748Smckusick 				PAGE_ASSERT_WAIT(m, !change_wiring);
23245748Smckusick 				UNLOCK_THINGS;
23345748Smckusick 				thread_block();
23445748Smckusick 				vm_object_deallocate(first_object);
23545748Smckusick 				goto RetryFault;
23645748Smckusick #endif
23745748Smckusick 			}
23845748Smckusick 
23945748Smckusick 			if (m->absent)
24045748Smckusick 				panic("vm_fault: absent");
24145748Smckusick 
24245748Smckusick 			/*
24345748Smckusick 			 *	If the desired access to this page has
24445748Smckusick 			 *	been locked out, request that it be unlocked.
24545748Smckusick 			 */
24645748Smckusick 
24745748Smckusick 			if (fault_type & m->page_lock) {
24845748Smckusick #ifdef DOTHREADS
24945748Smckusick 				int	wait_result;
25045748Smckusick 
25145748Smckusick 				if ((fault_type & m->unlock_request) != fault_type)
25245748Smckusick 					panic("vm_fault: pager_data_unlock");
25345748Smckusick 
25445748Smckusick 				PAGE_ASSERT_WAIT(m, !change_wiring);
25545748Smckusick 				UNLOCK_THINGS;
25645748Smckusick 				thread_block();
25745748Smckusick 				wait_result = current_thread()->wait_result;
25845748Smckusick 				vm_object_deallocate(first_object);
25945748Smckusick 				if (wait_result != THREAD_AWAKENED)
26045748Smckusick 					return(KERN_SUCCESS);
26145748Smckusick 				goto RetryFault;
26245748Smckusick #else
26345748Smckusick 				if ((fault_type & m->unlock_request) != fault_type)
26445748Smckusick 					panic("vm_fault: pager_data_unlock");
26545748Smckusick 
26645748Smckusick 				PAGE_ASSERT_WAIT(m, !change_wiring);
26745748Smckusick 				UNLOCK_THINGS;
26845748Smckusick 				thread_block();
26945748Smckusick 				vm_object_deallocate(first_object);
27045748Smckusick 				goto RetryFault;
27145748Smckusick #endif
27245748Smckusick 			}
27345748Smckusick 
27445748Smckusick 			/*
27545748Smckusick 			 *	Remove the page from the pageout daemon's
27645748Smckusick 			 *	reach while we play with it.
27745748Smckusick 			 */
27845748Smckusick 
27945748Smckusick 			vm_page_lock_queues();
28045748Smckusick 			if (m->inactive) {
28145748Smckusick 				queue_remove(&vm_page_queue_inactive, m,
28245748Smckusick 						vm_page_t, pageq);
28345748Smckusick 				m->inactive = FALSE;
28445748Smckusick 				vm_page_inactive_count--;
28545748Smckusick 				vm_stat.reactivations++;
28645748Smckusick 			}
28745748Smckusick 
28845748Smckusick 			if (m->active) {
28945748Smckusick 				queue_remove(&vm_page_queue_active, m,
29045748Smckusick 						vm_page_t, pageq);
29145748Smckusick 				m->active = FALSE;
29245748Smckusick 				vm_page_active_count--;
29345748Smckusick 			}
29445748Smckusick 			vm_page_unlock_queues();
29545748Smckusick 
29645748Smckusick 			/*
29745748Smckusick 			 *	Mark page busy for other threads.
29845748Smckusick 			 */
29945748Smckusick 			m->busy = TRUE;
30045748Smckusick 			m->absent = FALSE;
30145748Smckusick 			break;
30245748Smckusick 		}
30345748Smckusick 
30445748Smckusick 		if (((object->pager != vm_pager_null) &&
30545748Smckusick 				(!change_wiring || wired))
30645748Smckusick 		    || (object == first_object)) {
30745748Smckusick 
30845748Smckusick 			/*
30945748Smckusick 			 *	Allocate a new page for this object/offset
31045748Smckusick 			 *	pair.
31145748Smckusick 			 */
31245748Smckusick 
31345748Smckusick 			m = vm_page_alloc(object, offset);
31445748Smckusick 
31545748Smckusick 			if (m == VM_PAGE_NULL) {
31645748Smckusick 				UNLOCK_AND_DEALLOCATE;
31745748Smckusick 				VM_WAIT;
31845748Smckusick 				goto RetryFault;
31945748Smckusick 			}
32045748Smckusick 		}
32145748Smckusick 
32245748Smckusick 		if ((object->pager != vm_pager_null) &&
32345748Smckusick 				(!change_wiring || wired)) {
32445748Smckusick 			int rv;
32545748Smckusick 
32645748Smckusick 			/*
32745748Smckusick 			 *	Now that we have a busy page, we can
32845748Smckusick 			 *	release the object lock.
32945748Smckusick 			 */
33045748Smckusick 			vm_object_unlock(object);
33145748Smckusick 
33245748Smckusick 			/*
33345748Smckusick 			 *	Call the pager to retrieve the data, if any,
33445748Smckusick 			 *	after releasing the lock on the map.
33545748Smckusick 			 */
33645748Smckusick 			UNLOCK_MAP;
33745748Smckusick 
33845748Smckusick 			rv = vm_pager_get(object->pager, m, TRUE);
33945748Smckusick 			if (rv == VM_PAGER_OK) {
34045748Smckusick 				/*
34145748Smckusick 				 *	Found the page.
34245748Smckusick 				 *	Leave it busy while we play with it.
34345748Smckusick 				 */
34445748Smckusick 				vm_object_lock(object);
34545748Smckusick 
34645748Smckusick 				/*
34745748Smckusick 				 *	Relookup in case pager changed page.
34845748Smckusick 				 *	Pager is responsible for disposition
34945748Smckusick 				 *	of old page if moved.
35045748Smckusick 				 */
35145748Smckusick 				m = vm_page_lookup(object, offset);
35245748Smckusick 
35345748Smckusick 				vm_stat.pageins++;
35445748Smckusick 				m->fake = FALSE;
35545748Smckusick 				pmap_clear_modify(VM_PAGE_TO_PHYS(m));
35645748Smckusick 				break;
35745748Smckusick 			}
35845748Smckusick 
35945748Smckusick 			/*
36045748Smckusick 			 *	Remove the bogus page (which does not
36145748Smckusick 			 *	exist at this object/offset); before
36245748Smckusick 			 *	doing so, we must get back our object
36345748Smckusick 			 *	lock to preserve our invariant.
36445748Smckusick 			 *
36545748Smckusick 			 *	Also wake up any other thread that may want
36645748Smckusick 			 *	to bring in this page.
36745748Smckusick 			 *
36845748Smckusick 			 *	If this is the top-level object, we must
36945748Smckusick 			 *	leave the busy page to prevent another
37045748Smckusick 			 *	thread from rushing past us, and inserting
37145748Smckusick 			 *	the page in that object at the same time
37245748Smckusick 			 *	that we are.
37345748Smckusick 			 */
37445748Smckusick 
37545748Smckusick 			vm_object_lock(object);
37645748Smckusick 			/*
37745748Smckusick 			 * Data outside the range of the pager; an error
37845748Smckusick 			 */
37945748Smckusick 			if (rv == VM_PAGER_BAD) {
38045748Smckusick 				FREE_PAGE(m);
38145748Smckusick 				UNLOCK_AND_DEALLOCATE;
38245748Smckusick 				return(KERN_PROTECTION_FAILURE); /* XXX */
38345748Smckusick 			}
38445748Smckusick 			if (object != first_object) {
38545748Smckusick 				FREE_PAGE(m);
38645748Smckusick 				/*
38745748Smckusick 				 * XXX - we cannot just fall out at this
38845748Smckusick 				 * point, m has been freed and is invalid!
38945748Smckusick 				 */
39045748Smckusick 			}
39145748Smckusick 		}
39245748Smckusick 
39345748Smckusick 		/*
39445748Smckusick 		 * We get here if the object has no pager (or unwiring)
39545748Smckusick 		 * or the pager doesn't have the page.
39645748Smckusick 		 */
39745748Smckusick 		if (object == first_object)
39845748Smckusick 			first_m = m;
39945748Smckusick 
40045748Smckusick 		/*
40145748Smckusick 		 *	Move on to the next object.  Lock the next
40245748Smckusick 		 *	object before unlocking the current one.
40345748Smckusick 		 */
40445748Smckusick 
40545748Smckusick 		offset += object->shadow_offset;
40645748Smckusick 		next_object = object->shadow;
40745748Smckusick 		if (next_object == VM_OBJECT_NULL) {
40845748Smckusick 			/*
40945748Smckusick 			 *	If there's no object left, fill the page
41045748Smckusick 			 *	in the top object with zeros.
41145748Smckusick 			 */
41245748Smckusick 			if (object != first_object) {
41345748Smckusick 				object->paging_in_progress--;
41445748Smckusick 				vm_object_unlock(object);
41545748Smckusick 
41645748Smckusick 				object = first_object;
41745748Smckusick 				offset = first_offset;
41845748Smckusick 				m = first_m;
41945748Smckusick 				vm_object_lock(object);
42045748Smckusick 			}
42145748Smckusick 			first_m = VM_PAGE_NULL;
42245748Smckusick 
42345748Smckusick 			vm_page_zero_fill(m);
42445748Smckusick 			vm_stat.zero_fill_count++;
42545748Smckusick 			m->fake = FALSE;
42645748Smckusick 			m->absent = FALSE;
42745748Smckusick 			break;
42845748Smckusick 		}
42945748Smckusick 		else {
43045748Smckusick 			vm_object_lock(next_object);
43145748Smckusick 			if (object != first_object)
43245748Smckusick 				object->paging_in_progress--;
43345748Smckusick 			vm_object_unlock(object);
43445748Smckusick 			object = next_object;
43545748Smckusick 			object->paging_in_progress++;
43645748Smckusick 		}
43745748Smckusick 	}
43845748Smckusick 
43945748Smckusick 	if (m->absent || m->active || m->inactive || !m->busy)
44045748Smckusick 		panic("vm_fault: absent or active or inactive or not busy after main loop");
44145748Smckusick 
44245748Smckusick 	/*
44345748Smckusick 	 *	PAGE HAS BEEN FOUND.
44445748Smckusick 	 *	[Loop invariant still holds -- the object lock
44545748Smckusick 	 *	is held.]
44645748Smckusick 	 */
44745748Smckusick 
44845748Smckusick 	old_m = m;	/* save page that would be copied */
44945748Smckusick 
45045748Smckusick 	/*
45145748Smckusick 	 *	If the page is being written, but isn't
45245748Smckusick 	 *	already owned by the top-level object,
45345748Smckusick 	 *	we have to copy it into a new page owned
45445748Smckusick 	 *	by the top-level object.
45545748Smckusick 	 */
45645748Smckusick 
45745748Smckusick 	if (object != first_object) {
45845748Smckusick 	    	/*
45945748Smckusick 		 *	We only really need to copy if we
46045748Smckusick 		 *	want to write it.
46145748Smckusick 		 */
46245748Smckusick 
46345748Smckusick 	    	if (fault_type & VM_PROT_WRITE) {
46445748Smckusick 
46545748Smckusick 			/*
46645748Smckusick 			 *	If we try to collapse first_object at this
46745748Smckusick 			 *	point, we may deadlock when we try to get
46845748Smckusick 			 *	the lock on an intermediate object (since we
46945748Smckusick 			 *	have the bottom object locked).  We can't
47045748Smckusick 			 *	unlock the bottom object, because the page
47145748Smckusick 			 *	we found may move (by collapse) if we do.
47245748Smckusick 			 *
47345748Smckusick 			 *	Instead, we first copy the page.  Then, when
47445748Smckusick 			 *	we have no more use for the bottom object,
47545748Smckusick 			 *	we unlock it and try to collapse.
47645748Smckusick 			 *
47745748Smckusick 			 *	Note that we copy the page even if we didn't
47845748Smckusick 			 *	need to... that's the breaks.
47945748Smckusick 			 */
48045748Smckusick 
48145748Smckusick 		    	/*
48245748Smckusick 			 *	We already have an empty page in
48345748Smckusick 			 *	first_object - use it.
48445748Smckusick 			 */
48545748Smckusick 
48645748Smckusick 			vm_page_copy(m, first_m);
48745748Smckusick 			first_m->fake = FALSE;
48845748Smckusick 			first_m->absent = FALSE;
48945748Smckusick 
49045748Smckusick 			/*
49145748Smckusick 			 *	If another map is truly sharing this
49245748Smckusick 			 *	page with us, we have to flush all
49345748Smckusick 			 *	uses of the original page, since we
49445748Smckusick 			 *	can't distinguish those which want the
49545748Smckusick 			 *	original from those which need the
49645748Smckusick 			 *	new copy.
49745748Smckusick 			 */
49845748Smckusick 
49945748Smckusick 			vm_page_lock_queues();
50045748Smckusick 			if (!su) {
50145748Smckusick 				/*
50245748Smckusick 				 *	Also, once it's no longer in
50345748Smckusick 				 *	use by any maps, move it to
50445748Smckusick 				 *	the inactive queue instead.
50545748Smckusick 				 */
50645748Smckusick 
50745748Smckusick 				vm_page_deactivate(m);
50845748Smckusick 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
50945748Smckusick 			}
51045748Smckusick 			else {
51145748Smckusick 				/*
51245748Smckusick 				 *	Old page is only (possibly)
51345748Smckusick 				 *	in use by faulting map.  We
51445748Smckusick 				 *	should do a pmap_remove on
51545748Smckusick 				 *	that mapping, but we know
51645748Smckusick 				 *	that pmap_enter will remove
51745748Smckusick 				 *	the old mapping before
51845748Smckusick 				 *	inserting the new one.
51945748Smckusick 				 */
52045748Smckusick 				vm_page_activate(m);
52145748Smckusick 			}
52245748Smckusick 			vm_page_unlock_queues();
52345748Smckusick 
52445748Smckusick 			/*
52545748Smckusick 			 *	We no longer need the old page or object.
52645748Smckusick 			 */
52745748Smckusick 			PAGE_WAKEUP(m);
52845748Smckusick 			object->paging_in_progress--;
52945748Smckusick 			vm_object_unlock(object);
53045748Smckusick 
53145748Smckusick 			/*
53245748Smckusick 			 *	Only use the new page below...
53345748Smckusick 			 */
53445748Smckusick 
53545748Smckusick 			vm_stat.cow_faults++;
53645748Smckusick 			m = first_m;
53745748Smckusick 			object = first_object;
53845748Smckusick 			offset = first_offset;
53945748Smckusick 
54045748Smckusick 			/*
54145748Smckusick 			 *	Now that we've gotten the copy out of the
54245748Smckusick 			 *	way, let's try to collapse the top object.
54345748Smckusick 			 */
54445748Smckusick 			vm_object_lock(object);
54545748Smckusick 			/*
54645748Smckusick 			 *	But we have to play ugly games with
54745748Smckusick 			 *	paging_in_progress to do that...
54845748Smckusick 			 */
54945748Smckusick 			object->paging_in_progress--;
55045748Smckusick 			vm_object_collapse(object);
55145748Smckusick 			object->paging_in_progress++;
55245748Smckusick 		}
55345748Smckusick 		else {
55445748Smckusick 		    	prot &= (~VM_PROT_WRITE);
55545748Smckusick 			m->copy_on_write = TRUE;
55645748Smckusick 		}
55745748Smckusick 	}
55845748Smckusick 
55945748Smckusick 	if (m->active || m->inactive)
56045748Smckusick 		panic("vm_fault: active or inactive before copy object handling");
56145748Smckusick 
56245748Smckusick 	/*
56345748Smckusick 	 *	If the page is being written, but hasn't been
56445748Smckusick 	 *	copied to the copy-object, we have to copy it there.
56545748Smckusick 	 */
56645748Smckusick     RetryCopy:
56745748Smckusick 	if (first_object->copy != VM_OBJECT_NULL) {
56845748Smckusick 		vm_object_t		copy_object = first_object->copy;
56945748Smckusick 		vm_offset_t		copy_offset;
57045748Smckusick 		vm_page_t		copy_m;
57145748Smckusick 
57245748Smckusick 		/*
57345748Smckusick 		 *	We only need to copy if we want to write it.
57445748Smckusick 		 */
57545748Smckusick 		if ((fault_type & VM_PROT_WRITE) == 0) {
57645748Smckusick 			prot &= ~VM_PROT_WRITE;
57745748Smckusick 			m->copy_on_write = TRUE;
57845748Smckusick 		}
57945748Smckusick 		else {
58045748Smckusick 			/*
58145748Smckusick 			 *	Try to get the lock on the copy_object.
58245748Smckusick 			 */
58345748Smckusick 			if (!vm_object_lock_try(copy_object)) {
58445748Smckusick 				vm_object_unlock(object);
58545748Smckusick 				/* should spin a bit here... */
58645748Smckusick 				vm_object_lock(object);
58745748Smckusick 				goto RetryCopy;
58845748Smckusick 			}
58945748Smckusick 
59045748Smckusick 			/*
59145748Smckusick 			 *	Make another reference to the copy-object,
59245748Smckusick 			 *	to keep it from disappearing during the
59345748Smckusick 			 *	copy.
59445748Smckusick 			 */
59545748Smckusick 			copy_object->ref_count++;
59645748Smckusick 
59745748Smckusick 			/*
59845748Smckusick 			 *	Does the page exist in the copy?
59945748Smckusick 			 */
60045748Smckusick 			copy_offset = first_offset
60145748Smckusick 				- copy_object->shadow_offset;
60245748Smckusick 			copy_m = vm_page_lookup(copy_object, copy_offset);
60345748Smckusick 			if (page_exists = (copy_m != VM_PAGE_NULL)) {
60445748Smckusick 				if (copy_m->busy) {
60545748Smckusick #ifdef DOTHREADS
60645748Smckusick 					int	wait_result;
60745748Smckusick 
60845748Smckusick 					/*
60945748Smckusick 					 *	If the page is being brought
61045748Smckusick 					 *	in, wait for it and then retry.
61145748Smckusick 					 */
61245748Smckusick 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
61345748Smckusick 					RELEASE_PAGE(m);
61445748Smckusick 					copy_object->ref_count--;
61545748Smckusick 					vm_object_unlock(copy_object);
61645748Smckusick 					UNLOCK_THINGS;
61745748Smckusick 					thread_block();
61845748Smckusick 					wait_result = current_thread()->wait_result;
61945748Smckusick 					vm_object_deallocate(first_object);
62045748Smckusick 					if (wait_result != THREAD_AWAKENED)
62145748Smckusick 						return(KERN_SUCCESS);
62245748Smckusick 					goto RetryFault;
62345748Smckusick #else
62445748Smckusick 					/*
62545748Smckusick 					 *	If the page is being brought
62645748Smckusick 					 *	in, wait for it and then retry.
62745748Smckusick 					 */
62845748Smckusick 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
62945748Smckusick 					RELEASE_PAGE(m);
63045748Smckusick 					copy_object->ref_count--;
63145748Smckusick 					vm_object_unlock(copy_object);
63245748Smckusick 					UNLOCK_THINGS;
63345748Smckusick 					thread_block();
63445748Smckusick 					vm_object_deallocate(first_object);
63545748Smckusick 					goto RetryFault;
63645748Smckusick #endif
63745748Smckusick 				}
63845748Smckusick 			}
63945748Smckusick 
64045748Smckusick 			/*
64145748Smckusick 			 *	If the page is not in memory (in the object)
64245748Smckusick 			 *	and the object has a pager, we have to check
64345748Smckusick 			 *	if the pager has the data in secondary
64445748Smckusick 			 *	storage.
64545748Smckusick 			 */
64645748Smckusick 			if (!page_exists) {
64745748Smckusick 
64845748Smckusick 				/*
64945748Smckusick 				 *	If we don't allocate a (blank) page
65045748Smckusick 				 *	here... another thread could try
65145748Smckusick 				 *	to page it in, allocate a page, and
65245748Smckusick 				 *	then block on the busy page in its
65345748Smckusick 				 *	shadow (first_object).  Then we'd
65445748Smckusick 				 *	trip over the busy page after we
65545748Smckusick 				 *	found that the copy_object's pager
65645748Smckusick 				 *	doesn't have the page...
65745748Smckusick 				 */
65845748Smckusick 				copy_m = vm_page_alloc(copy_object,
65945748Smckusick 								copy_offset);
66045748Smckusick 				if (copy_m == VM_PAGE_NULL) {
66145748Smckusick 					/*
66245748Smckusick 					 *	Wait for a page, then retry.
66345748Smckusick 					 */
66445748Smckusick 					RELEASE_PAGE(m);
66545748Smckusick 					copy_object->ref_count--;
66645748Smckusick 					vm_object_unlock(copy_object);
66745748Smckusick 					UNLOCK_AND_DEALLOCATE;
66845748Smckusick 					VM_WAIT;
66945748Smckusick 					goto RetryFault;
67045748Smckusick 				}
67145748Smckusick 
67245748Smckusick 			 	if (copy_object->pager != vm_pager_null) {
67345748Smckusick 					vm_object_unlock(object);
67445748Smckusick 					vm_object_unlock(copy_object);
67545748Smckusick 					UNLOCK_MAP;
67645748Smckusick 
67745748Smckusick 					page_exists = vm_pager_has_page(
67845748Smckusick 							copy_object->pager,
67945748Smckusick 							(copy_offset + copy_object->paging_offset));
68045748Smckusick 
68145748Smckusick 					vm_object_lock(copy_object);
68245748Smckusick 
68345748Smckusick 					/*
68445748Smckusick 					 * Since the map is unlocked, someone
68545748Smckusick 					 * else could have copied this object
68645748Smckusick 					 * and put a different copy_object
68745748Smckusick 					 * between the two.  Or, the last
68845748Smckusick 					 * reference to the copy-object (other
68945748Smckusick 					 * than the one we have) may have
69045748Smckusick 					 * disappeared - if that has happened,
69145748Smckusick 					 * we don't need to make the copy.
69245748Smckusick 					 */
69345748Smckusick 					if (copy_object->shadow != object ||
69445748Smckusick 					    copy_object->ref_count == 1) {
69545748Smckusick 						/*
69645748Smckusick 						 *	Gaah... start over!
69745748Smckusick 						 */
69845748Smckusick 						FREE_PAGE(copy_m);
69945748Smckusick 						vm_object_unlock(copy_object);
70045748Smckusick 						vm_object_deallocate(copy_object);
70145748Smckusick 							/* may block */
70245748Smckusick 						vm_object_lock(object);
70345748Smckusick 						goto RetryCopy;
70445748Smckusick 					}
70545748Smckusick 					vm_object_lock(object);
70645748Smckusick 
70745748Smckusick 					if (page_exists) {
70845748Smckusick 						/*
70945748Smckusick 						 *	We didn't need the page
71045748Smckusick 						 */
71145748Smckusick 						FREE_PAGE(copy_m);
71245748Smckusick 					}
71345748Smckusick 				}
71445748Smckusick 			}
71545748Smckusick 			if (!page_exists) {
71645748Smckusick 				/*
71745748Smckusick 				 *	Must copy page into copy-object.
71845748Smckusick 				 */
71945748Smckusick 				vm_page_copy(m, copy_m);
72045748Smckusick 				copy_m->fake = FALSE;
72145748Smckusick 				copy_m->absent = FALSE;
72245748Smckusick 
72345748Smckusick 				/*
72445748Smckusick 				 * Things to remember:
72545748Smckusick 				 * 1. The copied page must be marked 'dirty'
72645748Smckusick 				 *    so it will be paged out to the copy
72745748Smckusick 				 *    object.
72845748Smckusick 				 * 2. If the old page was in use by any users
72945748Smckusick 				 *    of the copy-object, it must be removed
73045748Smckusick 				 *    from all pmaps.  (We can't know which
73145748Smckusick 				 *    pmaps use it.)
73245748Smckusick 				 */
73345748Smckusick 				vm_page_lock_queues();
73445748Smckusick 				pmap_remove_all(VM_PAGE_TO_PHYS(old_m));
73545748Smckusick 				copy_m->clean = FALSE;
73645748Smckusick 				vm_page_activate(copy_m);	/* XXX */
73745748Smckusick 				vm_page_unlock_queues();
73845748Smckusick 
73945748Smckusick 				PAGE_WAKEUP(copy_m);
74045748Smckusick 			}
74145748Smckusick 			/*
74245748Smckusick 			 *	The reference count on copy_object must be
74345748Smckusick 			 *	at least 2: one for our extra reference,
74445748Smckusick 			 *	and at least one from the outside world
74545748Smckusick 			 *	(we checked that when we last locked
74645748Smckusick 			 *	copy_object).
74745748Smckusick 			 */
74845748Smckusick 			copy_object->ref_count--;
74945748Smckusick 			vm_object_unlock(copy_object);
75045748Smckusick 			m->copy_on_write = FALSE;
75145748Smckusick 		}
75245748Smckusick 	}
75345748Smckusick 
75445748Smckusick 	if (m->active || m->inactive)
75545748Smckusick 		panic("vm_fault: active or inactive before retrying lookup");
75645748Smckusick 
75745748Smckusick 	/*
75845748Smckusick 	 *	We must verify that the maps have not changed
75945748Smckusick 	 *	since our last lookup.
76045748Smckusick 	 */
76145748Smckusick 
76245748Smckusick 	if (!lookup_still_valid) {
76345748Smckusick 		vm_object_t	retry_object;
76445748Smckusick 		vm_offset_t	retry_offset;
76545748Smckusick 		vm_prot_t	retry_prot;
76645748Smckusick 
76745748Smckusick 		/*
76845748Smckusick 		 *	Since map entries may be pageable, make sure we can
76945748Smckusick 		 *	take a page fault on them.
77045748Smckusick 		 */
77145748Smckusick 		vm_object_unlock(object);
77245748Smckusick 
77345748Smckusick 		/*
77445748Smckusick 		 *	To avoid trying to write_lock the map while another
77545748Smckusick 		 *	thread has it read_locked (in vm_map_pageable), we
77645748Smckusick 		 *	do not try for write permission.  If the page is
77745748Smckusick 		 *	still writable, we will get write permission.  If it
77845748Smckusick 		 *	is not, or has been marked needs_copy, we enter the
77945748Smckusick 		 *	mapping without write permission, and will merely
78045748Smckusick 		 *	take another fault.
78145748Smckusick 		 */
78245748Smckusick 		result = vm_map_lookup(&map, vaddr,
78345748Smckusick 				fault_type & ~VM_PROT_WRITE, &entry,
78445748Smckusick 				&retry_object, &retry_offset, &retry_prot,
78545748Smckusick 				&wired, &su);
78645748Smckusick 
78745748Smckusick 		vm_object_lock(object);
78845748Smckusick 
78945748Smckusick 		/*
79045748Smckusick 		 *	If we don't need the page any longer, put it on the
79145748Smckusick 		 *	active list (the easiest thing to do here).  If no
79245748Smckusick 		 *	one needs it, pageout will grab it eventually.
79345748Smckusick 		 */
79445748Smckusick 
79545748Smckusick 		if (result != KERN_SUCCESS) {
79645748Smckusick 			RELEASE_PAGE(m);
79745748Smckusick 			UNLOCK_AND_DEALLOCATE;
79845748Smckusick 			return(result);
79945748Smckusick 		}
80045748Smckusick 
80145748Smckusick 		lookup_still_valid = TRUE;
80245748Smckusick 
80345748Smckusick 		if ((retry_object != first_object) ||
80445748Smckusick 				(retry_offset != first_offset)) {
80545748Smckusick 			RELEASE_PAGE(m);
80645748Smckusick 			UNLOCK_AND_DEALLOCATE;
80745748Smckusick 			goto RetryFault;
80845748Smckusick 		}
80945748Smckusick 
81045748Smckusick 		/*
81145748Smckusick 		 *	Check whether the protection has changed or the object
81245748Smckusick 		 *	has been copied while we left the map unlocked.
81345748Smckusick 		 *	Changing from read to write permission is OK - we leave
81445748Smckusick 		 *	the page write-protected, and catch the write fault.
81545748Smckusick 		 *	Changing from write to read permission means that we
81645748Smckusick 		 *	can't mark the page write-enabled after all.
81745748Smckusick 		 */
81845748Smckusick 		prot &= retry_prot;
81945748Smckusick 		if (m->copy_on_write)
82045748Smckusick 			prot &= ~VM_PROT_WRITE;
82145748Smckusick 	}
82245748Smckusick 
82345748Smckusick 	/*
82445748Smckusick 	 * (the various bits we're fiddling with here are locked by
82545748Smckusick 	 * the object's lock)
82645748Smckusick 	 */
82745748Smckusick 
82845748Smckusick 	/* XXX This distorts the meaning of the copy_on_write bit */
82945748Smckusick 
83045748Smckusick 	if (prot & VM_PROT_WRITE)
83145748Smckusick 		m->copy_on_write = FALSE;
83245748Smckusick 
83345748Smckusick 	/*
83445748Smckusick 	 *	It's critically important that a wired-down page be faulted
83545748Smckusick 	 *	only once in each map for which it is wired.
83645748Smckusick 	 */
83745748Smckusick 
83845748Smckusick 	if (m->active || m->inactive)
83945748Smckusick 		panic("vm_fault: active or inactive before pmap_enter");
84045748Smckusick 
84145748Smckusick 	vm_object_unlock(object);
84245748Smckusick 
84345748Smckusick 	/*
84445748Smckusick 	 *	Put this page into the physical map.
84545748Smckusick 	 *	We had to do the unlock above because pmap_enter
84645748Smckusick 	 *	may cause other faults.   We don't put the
84745748Smckusick 	 *	page back on the active queue until later so
84845748Smckusick 	 *	that the page-out daemon won't find us (yet).
84945748Smckusick 	 */
85045748Smckusick 
85145748Smckusick 	pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m),
85245748Smckusick 			prot & ~(m->page_lock), wired);
85345748Smckusick 
85445748Smckusick 	/*
85545748Smckusick 	 *	If the page is not wired down, then put it where the
85645748Smckusick 	 *	pageout daemon can find it.
85745748Smckusick 	 */
85845748Smckusick 	vm_object_lock(object);
85945748Smckusick 	vm_page_lock_queues();
86045748Smckusick 	if (change_wiring) {
86145748Smckusick 		if (wired)
86245748Smckusick 			vm_page_wire(m);
86345748Smckusick 		else
86445748Smckusick 			vm_page_unwire(m);
86545748Smckusick 	}
86645748Smckusick 	else
86745748Smckusick 		vm_page_activate(m);
86845748Smckusick 	vm_page_unlock_queues();
86945748Smckusick 
87045748Smckusick 	/*
87145748Smckusick 	 *	Unlock everything, and return
87245748Smckusick 	 */
87345748Smckusick 
87445748Smckusick 	PAGE_WAKEUP(m);
87545748Smckusick 	UNLOCK_AND_DEALLOCATE;
87645748Smckusick 
87745748Smckusick 	return(KERN_SUCCESS);
87845748Smckusick 
87945748Smckusick }
88045748Smckusick 
88145748Smckusick /*
88245748Smckusick  *	vm_fault_wire:
88345748Smckusick  *
88445748Smckusick  *	Wire down a range of virtual addresses in a map.
88545748Smckusick  */
88645748Smckusick void vm_fault_wire(map, start, end)
88745748Smckusick 	vm_map_t	map;
88845748Smckusick 	vm_offset_t	start, end;
88945748Smckusick {
89045748Smckusick 
89145748Smckusick 	register vm_offset_t	va;
89245748Smckusick 	register pmap_t		pmap;
89345748Smckusick 
89445748Smckusick 	pmap = vm_map_pmap(map);
89545748Smckusick 
89645748Smckusick 	/*
89745748Smckusick 	 *	Inform the physical mapping system that the
89845748Smckusick 	 *	range of addresses may not fault, so that
89945748Smckusick 	 *	page tables and such can be locked down as well.
90045748Smckusick 	 */
90145748Smckusick 
90245748Smckusick 	pmap_pageable(pmap, start, end, FALSE);
90345748Smckusick 
90445748Smckusick 	/*
90545748Smckusick 	 *	We simulate a fault to get the page and enter it
90645748Smckusick 	 *	in the physical map.
90745748Smckusick 	 */
90845748Smckusick 
90945748Smckusick 	for (va = start; va < end; va += PAGE_SIZE) {
91045748Smckusick 		(void) vm_fault(map, va, VM_PROT_NONE, TRUE);
91145748Smckusick 	}
91245748Smckusick }
91345748Smckusick 
91445748Smckusick 
91545748Smckusick /*
91645748Smckusick  *	vm_fault_unwire:
91745748Smckusick  *
91845748Smckusick  *	Unwire a range of virtual addresses in a map.
91945748Smckusick  */
92045748Smckusick void vm_fault_unwire(map, start, end)
92145748Smckusick 	vm_map_t	map;
92245748Smckusick 	vm_offset_t	start, end;
92345748Smckusick {
92445748Smckusick 
92545748Smckusick 	register vm_offset_t	va, pa;
92645748Smckusick 	register pmap_t		pmap;
92745748Smckusick 
92845748Smckusick 	pmap = vm_map_pmap(map);
92945748Smckusick 
93045748Smckusick 	/*
93145748Smckusick 	 *	Since the pages are wired down, we must be able to
93245748Smckusick 	 *	get their mappings from the physical map system.
93345748Smckusick 	 */
93445748Smckusick 
93545748Smckusick 	vm_page_lock_queues();
93645748Smckusick 
93745748Smckusick 	for (va = start; va < end; va += PAGE_SIZE) {
93845748Smckusick 		pa = pmap_extract(pmap, va);
93945748Smckusick 		if (pa == (vm_offset_t) 0) {
94045748Smckusick 			panic("unwire: page not in pmap");
94145748Smckusick 		}
94245748Smckusick 		pmap_change_wiring(pmap, va, FALSE);
94345748Smckusick 		vm_page_unwire(PHYS_TO_VM_PAGE(pa));
94445748Smckusick 	}
94545748Smckusick 	vm_page_unlock_queues();
94645748Smckusick 
94745748Smckusick 	/*
94845748Smckusick 	 *	Inform the physical mapping system that the range
94945748Smckusick 	 *	of addresses may fault, so that page tables and
95045748Smckusick 	 *	such may be unwired themselves.
95145748Smckusick 	 */
95245748Smckusick 
95345748Smckusick 	pmap_pageable(pmap, start, end, TRUE);
95445748Smckusick 
95545748Smckusick }
95645748Smckusick 
95745748Smckusick /*
95845748Smckusick  *	Routine:
95945748Smckusick  *		vm_fault_copy_entry
96045748Smckusick  *	Function:
96145748Smckusick  *		Copy all of the pages from a wired-down map entry to another.
96245748Smckusick  *
96345748Smckusick  *	In/out conditions:
96445748Smckusick  *		The source and destination maps must be locked for write.
96545748Smckusick  *		The source map entry must be wired down (or be a sharing map
96645748Smckusick  *		entry corresponding to a main map entry that is wired down).
96745748Smckusick  */
96845748Smckusick 
96945748Smckusick void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
97045748Smckusick 	vm_map_t	dst_map;
97145748Smckusick 	vm_map_t	src_map;
97245748Smckusick 	vm_map_entry_t	dst_entry;
97345748Smckusick 	vm_map_entry_t	src_entry;
97445748Smckusick {
97545748Smckusick 
97645748Smckusick 	vm_object_t	dst_object;
97745748Smckusick 	vm_object_t	src_object;
97845748Smckusick 	vm_offset_t	dst_offset;
97945748Smckusick 	vm_offset_t	src_offset;
98045748Smckusick 	vm_prot_t	prot;
98145748Smckusick 	vm_offset_t	vaddr;
98245748Smckusick 	vm_page_t	dst_m;
98345748Smckusick 	vm_page_t	src_m;
98445748Smckusick 
98545748Smckusick #ifdef	lint
98645748Smckusick 	src_map++;
98745748Smckusick #endif	lint
98845748Smckusick 
98945748Smckusick 	src_object = src_entry->object.vm_object;
99045748Smckusick 	src_offset = src_entry->offset;
99145748Smckusick 
99245748Smckusick 	/*
99345748Smckusick 	 *	Create the top-level object for the destination entry.
99445748Smckusick 	 *	(Doesn't actually shadow anything - we copy the pages
99545748Smckusick 	 *	directly.)
99645748Smckusick 	 */
99745748Smckusick 	dst_object = vm_object_allocate(
99845748Smckusick 			(vm_size_t) (dst_entry->end - dst_entry->start));
99945748Smckusick 
100045748Smckusick 	dst_entry->object.vm_object = dst_object;
100145748Smckusick 	dst_entry->offset = 0;
100245748Smckusick 
100345748Smckusick 	prot  = dst_entry->max_protection;
100445748Smckusick 
100545748Smckusick 	/*
100645748Smckusick 	 *	Loop through all of the pages in the entry's range, copying
100745748Smckusick 	 *	each one from the source object (it should be there) to the
100845748Smckusick 	 *	destination object.
100945748Smckusick 	 */
101045748Smckusick 	for (vaddr = dst_entry->start, dst_offset = 0;
101145748Smckusick 	     vaddr < dst_entry->end;
101245748Smckusick 	     vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
101345748Smckusick 
101445748Smckusick 		/*
101545748Smckusick 		 *	Allocate a page in the destination object
101645748Smckusick 		 */
101745748Smckusick 		vm_object_lock(dst_object);
101845748Smckusick 		do {
101945748Smckusick 			dst_m = vm_page_alloc(dst_object, dst_offset);
102045748Smckusick 			if (dst_m == VM_PAGE_NULL) {
102145748Smckusick 				vm_object_unlock(dst_object);
102245748Smckusick 				VM_WAIT;
102345748Smckusick 				vm_object_lock(dst_object);
102445748Smckusick 			}
102545748Smckusick 		} while (dst_m == VM_PAGE_NULL);
102645748Smckusick 
102745748Smckusick 		/*
102845748Smckusick 		 *	Find the page in the source object, and copy it in.
102945748Smckusick 		 *	(Because the source is wired down, the page will be
103045748Smckusick 		 *	in memory.)
103145748Smckusick 		 */
103245748Smckusick 		vm_object_lock(src_object);
103345748Smckusick 		src_m = vm_page_lookup(src_object, dst_offset + src_offset);
103445748Smckusick 		if (src_m == VM_PAGE_NULL)
103545748Smckusick 			panic("vm_fault_copy_wired: page missing");
103645748Smckusick 
103745748Smckusick 		vm_page_copy(src_m, dst_m);
103845748Smckusick 
103945748Smckusick 		/*
104045748Smckusick 		 *	Enter it in the pmap...
104145748Smckusick 		 */
104245748Smckusick 		vm_object_unlock(src_object);
104345748Smckusick 		vm_object_unlock(dst_object);
104445748Smckusick 
104545748Smckusick 		pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
104645748Smckusick 				prot, FALSE);
104745748Smckusick 
104845748Smckusick 		/*
104945748Smckusick 		 *	Mark it no longer busy, and put it on the active list.
105045748Smckusick 		 */
105145748Smckusick 		vm_object_lock(dst_object);
105245748Smckusick 		vm_page_lock_queues();
105345748Smckusick 		vm_page_activate(dst_m);
105445748Smckusick 		vm_page_unlock_queues();
105545748Smckusick 		PAGE_WAKEUP(dst_m);
105645748Smckusick 		vm_object_unlock(dst_object);
105745748Smckusick 	}
105845748Smckusick 
105945748Smckusick }
1060