xref: /csrg-svn/sys/vm/vm_fault.c (revision 65231)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_fault.c	8.3 (Berkeley) 12/30/93
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Page fault handling module.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_pageout.h>
49 
50 /*
51  *	vm_fault:
52  *
53  *	Handle a page fault occuring at the given address,
54  *	requiring the given permissions, in the map specified.
55  *	If successful, the page is inserted into the
56  *	associated physical map.
57  *
58  *	NOTE: the given address should be truncated to the
59  *	proper page address.
60  *
61  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
62  *	a standard error specifying why the fault is fatal is returned.
63  *
64  *
65  *	The map in question must be referenced, and remains so.
66  *	Caller may hold no locks.
67  */
68 int
69 vm_fault(map, vaddr, fault_type, change_wiring)
70 	vm_map_t	map;
71 	vm_offset_t	vaddr;
72 	vm_prot_t	fault_type;
73 	boolean_t	change_wiring;
74 {
75 	vm_object_t		first_object;
76 	vm_offset_t		first_offset;
77 	vm_map_entry_t		entry;
78 	register vm_object_t	object;
79 	register vm_offset_t	offset;
80 	register vm_page_t	m;
81 	vm_page_t		first_m;
82 	vm_prot_t		prot;
83 	int			result;
84 	boolean_t		wired;
85 	boolean_t		su;
86 	boolean_t		lookup_still_valid;
87 	boolean_t		page_exists;
88 	vm_page_t		old_m;
89 	vm_object_t		next_object;
90 
91 	cnt.v_vm_faults++;		/* needs lock XXX */
92 /*
93  *	Recovery actions
94  */
95 #define	FREE_PAGE(m)	{				\
96 	PAGE_WAKEUP(m);					\
97 	vm_page_lock_queues();				\
98 	vm_page_free(m);				\
99 	vm_page_unlock_queues();			\
100 }
101 
102 #define	RELEASE_PAGE(m)	{				\
103 	PAGE_WAKEUP(m);					\
104 	vm_page_lock_queues();				\
105 	vm_page_activate(m);				\
106 	vm_page_unlock_queues();			\
107 }
108 
109 #define	UNLOCK_MAP	{				\
110 	if (lookup_still_valid) {			\
111 		vm_map_lookup_done(map, entry);		\
112 		lookup_still_valid = FALSE;		\
113 	}						\
114 }
115 
116 #define	UNLOCK_THINGS	{				\
117 	object->paging_in_progress--;			\
118 	vm_object_unlock(object);			\
119 	if (object != first_object) {			\
120 		vm_object_lock(first_object);		\
121 		FREE_PAGE(first_m);			\
122 		first_object->paging_in_progress--;	\
123 		vm_object_unlock(first_object);		\
124 	}						\
125 	UNLOCK_MAP;					\
126 }
127 
128 #define	UNLOCK_AND_DEALLOCATE	{			\
129 	UNLOCK_THINGS;					\
130 	vm_object_deallocate(first_object);		\
131 }
132 
133     RetryFault: ;
134 
135 	/*
136 	 *	Find the backing store object and offset into
137 	 *	it to begin the search.
138 	 */
139 
140 	if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
141 			&first_object, &first_offset,
142 			&prot, &wired, &su)) != KERN_SUCCESS) {
143 		return(result);
144 	}
145 	lookup_still_valid = TRUE;
146 
147 	if (wired)
148 		fault_type = prot;
149 
150 	first_m = NULL;
151 
152    	/*
153 	 *	Make a reference to this object to
154 	 *	prevent its disposal while we are messing with
155 	 *	it.  Once we have the reference, the map is free
156 	 *	to be diddled.  Since objects reference their
157 	 *	shadows (and copies), they will stay around as well.
158 	 */
159 
160 	vm_object_lock(first_object);
161 
162 	first_object->ref_count++;
163 	first_object->paging_in_progress++;
164 
165 	/*
166 	 *	INVARIANTS (through entire routine):
167 	 *
168 	 *	1)	At all times, we must either have the object
169 	 *		lock or a busy page in some object to prevent
170 	 *		some other thread from trying to bring in
171 	 *		the same page.
172 	 *
173 	 *		Note that we cannot hold any locks during the
174 	 *		pager access or when waiting for memory, so
175 	 *		we use a busy page then.
176 	 *
177 	 *		Note also that we aren't as concerned about
178 	 *		more than one thead attempting to pager_data_unlock
179 	 *		the same page at once, so we don't hold the page
180 	 *		as busy then, but do record the highest unlock
181 	 *		value so far.  [Unlock requests may also be delivered
182 	 *		out of order.]
183 	 *
184 	 *	2)	Once we have a busy page, we must remove it from
185 	 *		the pageout queues, so that the pageout daemon
186 	 *		will not grab it away.
187 	 *
188 	 *	3)	To prevent another thread from racing us down the
189 	 *		shadow chain and entering a new page in the top
190 	 *		object before we do, we must keep a busy page in
191 	 *		the top object while following the shadow chain.
192 	 *
193 	 *	4)	We must increment paging_in_progress on any object
194 	 *		for which we have a busy page, to prevent
195 	 *		vm_object_collapse from removing the busy page
196 	 *		without our noticing.
197 	 */
198 
199 	/*
200 	 *	Search for the page at object/offset.
201 	 */
202 
203 	object = first_object;
204 	offset = first_offset;
205 
206 	/*
207 	 *	See whether this page is resident
208 	 */
209 
210 	while (TRUE) {
211 		m = vm_page_lookup(object, offset);
212 		if (m != NULL) {
213 			/*
214 			 *	If the page is being brought in,
215 			 *	wait for it and then retry.
216 			 */
217 			if (m->flags & PG_BUSY) {
218 #ifdef DOTHREADS
219 				int	wait_result;
220 
221 				PAGE_ASSERT_WAIT(m, !change_wiring);
222 				UNLOCK_THINGS;
223 				thread_block();
224 				wait_result = current_thread()->wait_result;
225 				vm_object_deallocate(first_object);
226 				if (wait_result != THREAD_AWAKENED)
227 					return(KERN_SUCCESS);
228 				goto RetryFault;
229 #else
230 				PAGE_ASSERT_WAIT(m, !change_wiring);
231 				UNLOCK_THINGS;
232 				thread_block();
233 				vm_object_deallocate(first_object);
234 				goto RetryFault;
235 #endif
236 			}
237 
238 			/*
239 			 *	Remove the page from the pageout daemon's
240 			 *	reach while we play with it.
241 			 */
242 
243 			vm_page_lock_queues();
244 			if (m->flags & PG_INACTIVE) {
245 				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
246 				m->flags &= ~PG_INACTIVE;
247 				cnt.v_inactive_count--;
248 				cnt.v_reactivated++;
249 			}
250 
251 			if (m->flags & PG_ACTIVE) {
252 				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
253 				m->flags &= ~PG_ACTIVE;
254 				cnt.v_active_count--;
255 			}
256 			vm_page_unlock_queues();
257 
258 			/*
259 			 *	Mark page busy for other threads.
260 			 */
261 			m->flags |= PG_BUSY;
262 			break;
263 		}
264 
265 		if (((object->pager != NULL) &&
266 				(!change_wiring || wired))
267 		    || (object == first_object)) {
268 
269 			/*
270 			 *	Allocate a new page for this object/offset
271 			 *	pair.
272 			 */
273 
274 			m = vm_page_alloc(object, offset);
275 
276 			if (m == NULL) {
277 				UNLOCK_AND_DEALLOCATE;
278 				VM_WAIT;
279 				goto RetryFault;
280 			}
281 		}
282 
283 		if (object->pager != NULL && (!change_wiring || wired)) {
284 			int rv;
285 
286 			/*
287 			 *	Now that we have a busy page, we can
288 			 *	release the object lock.
289 			 */
290 			vm_object_unlock(object);
291 
292 			/*
293 			 *	Call the pager to retrieve the data, if any,
294 			 *	after releasing the lock on the map.
295 			 */
296 			UNLOCK_MAP;
297 			rv = vm_pager_get(object->pager, m, TRUE);
298 
299 			/*
300 			 *	Reaquire the object lock to preserve our
301 			 *	invariant.
302 			 */
303 			vm_object_lock(object);
304 
305 			/*
306 			 *	Found the page.
307 			 *	Leave it busy while we play with it.
308 			 */
309 			if (rv == VM_PAGER_OK) {
310 				/*
311 				 *	Relookup in case pager changed page.
312 				 *	Pager is responsible for disposition
313 				 *	of old page if moved.
314 				 */
315 				m = vm_page_lookup(object, offset);
316 
317 				cnt.v_pageins++;
318 				m->flags &= ~PG_FAKE;
319 				m->flags |= PG_CLEAN;
320 				pmap_clear_modify(VM_PAGE_TO_PHYS(m));
321 				break;
322 			}
323 
324 			/*
325 			 * IO error or page outside the range of the pager:
326 			 * cleanup and return an error.
327 			 */
328 			if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
329 				FREE_PAGE(m);
330 				UNLOCK_AND_DEALLOCATE;
331 				return(KERN_PROTECTION_FAILURE); /* XXX */
332 			}
333 			/*
334 			 * rv == VM_PAGER_FAIL:
335 			 *
336 			 * Page does not exist at this object/offset.
337 			 * Free the bogus page (waking up anyone waiting
338 			 * for it) and continue on to the next object.
339 			 *
340 			 * If this is the top-level object, we must
341 			 * leave the busy page to prevent another
342 			 * thread from rushing past us, and inserting
343 			 * the page in that object at the same time
344 			 * that we are.
345 			 */
346 			if (object != first_object) {
347 				FREE_PAGE(m);
348 				/* note that `m' is not used after this */
349 			}
350 		}
351 
352 		/*
353 		 * We get here if the object has no pager (or unwiring)
354 		 * or the pager doesn't have the page.
355 		 */
356 		if (object == first_object)
357 			first_m = m;
358 
359 		/*
360 		 *	Move on to the next object.  Lock the next
361 		 *	object before unlocking the current one.
362 		 */
363 
364 		offset += object->shadow_offset;
365 		next_object = object->shadow;
366 		if (next_object == NULL) {
367 			/*
368 			 *	If there's no object left, fill the page
369 			 *	in the top object with zeros.
370 			 */
371 			if (object != first_object) {
372 				object->paging_in_progress--;
373 				vm_object_unlock(object);
374 
375 				object = first_object;
376 				offset = first_offset;
377 				m = first_m;
378 				vm_object_lock(object);
379 			}
380 			first_m = NULL;
381 
382 			vm_page_zero_fill(m);
383 			cnt.v_zfod++;
384 			m->flags &= ~PG_FAKE;
385 			break;
386 		}
387 		else {
388 			vm_object_lock(next_object);
389 			if (object != first_object)
390 				object->paging_in_progress--;
391 			vm_object_unlock(object);
392 			object = next_object;
393 			object->paging_in_progress++;
394 		}
395 	}
396 
397 	if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY)
398 		panic("vm_fault: active, inactive or !busy after main loop");
399 
400 	/*
401 	 *	PAGE HAS BEEN FOUND.
402 	 *	[Loop invariant still holds -- the object lock
403 	 *	is held.]
404 	 */
405 
406 	old_m = m;	/* save page that would be copied */
407 
408 	/*
409 	 *	If the page is being written, but isn't
410 	 *	already owned by the top-level object,
411 	 *	we have to copy it into a new page owned
412 	 *	by the top-level object.
413 	 */
414 
415 	if (object != first_object) {
416 	    	/*
417 		 *	We only really need to copy if we
418 		 *	want to write it.
419 		 */
420 
421 	    	if (fault_type & VM_PROT_WRITE) {
422 
423 			/*
424 			 *	If we try to collapse first_object at this
425 			 *	point, we may deadlock when we try to get
426 			 *	the lock on an intermediate object (since we
427 			 *	have the bottom object locked).  We can't
428 			 *	unlock the bottom object, because the page
429 			 *	we found may move (by collapse) if we do.
430 			 *
431 			 *	Instead, we first copy the page.  Then, when
432 			 *	we have no more use for the bottom object,
433 			 *	we unlock it and try to collapse.
434 			 *
435 			 *	Note that we copy the page even if we didn't
436 			 *	need to... that's the breaks.
437 			 */
438 
439 		    	/*
440 			 *	We already have an empty page in
441 			 *	first_object - use it.
442 			 */
443 
444 			vm_page_copy(m, first_m);
445 			first_m->flags &= ~PG_FAKE;
446 
447 			/*
448 			 *	If another map is truly sharing this
449 			 *	page with us, we have to flush all
450 			 *	uses of the original page, since we
451 			 *	can't distinguish those which want the
452 			 *	original from those which need the
453 			 *	new copy.
454 			 *
455 			 *	XXX If we know that only one map has
456 			 *	access to this page, then we could
457 			 *	avoid the pmap_page_protect() call.
458 			 */
459 
460 			vm_page_lock_queues();
461 			vm_page_activate(m);
462 			vm_page_deactivate(m);
463 			pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
464 			vm_page_unlock_queues();
465 
466 			/*
467 			 *	We no longer need the old page or object.
468 			 */
469 			PAGE_WAKEUP(m);
470 			object->paging_in_progress--;
471 			vm_object_unlock(object);
472 
473 			/*
474 			 *	Only use the new page below...
475 			 */
476 
477 			cnt.v_cow_faults++;
478 			m = first_m;
479 			object = first_object;
480 			offset = first_offset;
481 
482 			/*
483 			 *	Now that we've gotten the copy out of the
484 			 *	way, let's try to collapse the top object.
485 			 */
486 			vm_object_lock(object);
487 			/*
488 			 *	But we have to play ugly games with
489 			 *	paging_in_progress to do that...
490 			 */
491 			object->paging_in_progress--;
492 			vm_object_collapse(object);
493 			object->paging_in_progress++;
494 		}
495 		else {
496 		    	prot &= (~VM_PROT_WRITE);
497 			m->flags |= PG_COPYONWRITE;
498 		}
499 	}
500 
501 	if (m->flags & (PG_ACTIVE|PG_INACTIVE))
502 		panic("vm_fault: active or inactive before copy object handling");
503 
504 	/*
505 	 *	If the page is being written, but hasn't been
506 	 *	copied to the copy-object, we have to copy it there.
507 	 */
508     RetryCopy:
509 	if (first_object->copy != NULL) {
510 		vm_object_t copy_object = first_object->copy;
511 		vm_offset_t copy_offset;
512 		vm_page_t copy_m;
513 
514 		/*
515 		 *	We only need to copy if we want to write it.
516 		 */
517 		if ((fault_type & VM_PROT_WRITE) == 0) {
518 			prot &= ~VM_PROT_WRITE;
519 			m->flags |= PG_COPYONWRITE;
520 		}
521 		else {
522 			/*
523 			 *	Try to get the lock on the copy_object.
524 			 */
525 			if (!vm_object_lock_try(copy_object)) {
526 				vm_object_unlock(object);
527 				/* should spin a bit here... */
528 				vm_object_lock(object);
529 				goto RetryCopy;
530 			}
531 
532 			/*
533 			 *	Make another reference to the copy-object,
534 			 *	to keep it from disappearing during the
535 			 *	copy.
536 			 */
537 			copy_object->ref_count++;
538 
539 			/*
540 			 *	Does the page exist in the copy?
541 			 */
542 			copy_offset = first_offset
543 				- copy_object->shadow_offset;
544 			copy_m = vm_page_lookup(copy_object, copy_offset);
545 			if (page_exists = (copy_m != NULL)) {
546 				if (copy_m->flags & PG_BUSY) {
547 #ifdef DOTHREADS
548 					int	wait_result;
549 
550 					/*
551 					 *	If the page is being brought
552 					 *	in, wait for it and then retry.
553 					 */
554 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
555 					RELEASE_PAGE(m);
556 					copy_object->ref_count--;
557 					vm_object_unlock(copy_object);
558 					UNLOCK_THINGS;
559 					thread_block();
560 					wait_result = current_thread()->wait_result;
561 					vm_object_deallocate(first_object);
562 					if (wait_result != THREAD_AWAKENED)
563 						return(KERN_SUCCESS);
564 					goto RetryFault;
565 #else
566 					/*
567 					 *	If the page is being brought
568 					 *	in, wait for it and then retry.
569 					 */
570 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
571 					RELEASE_PAGE(m);
572 					copy_object->ref_count--;
573 					vm_object_unlock(copy_object);
574 					UNLOCK_THINGS;
575 					thread_block();
576 					vm_object_deallocate(first_object);
577 					goto RetryFault;
578 #endif
579 				}
580 			}
581 
582 			/*
583 			 *	If the page is not in memory (in the object)
584 			 *	and the object has a pager, we have to check
585 			 *	if the pager has the data in secondary
586 			 *	storage.
587 			 */
588 			if (!page_exists) {
589 
590 				/*
591 				 *	If we don't allocate a (blank) page
592 				 *	here... another thread could try
593 				 *	to page it in, allocate a page, and
594 				 *	then block on the busy page in its
595 				 *	shadow (first_object).  Then we'd
596 				 *	trip over the busy page after we
597 				 *	found that the copy_object's pager
598 				 *	doesn't have the page...
599 				 */
600 				copy_m = vm_page_alloc(copy_object,
601 								copy_offset);
602 				if (copy_m == NULL) {
603 					/*
604 					 *	Wait for a page, then retry.
605 					 */
606 					RELEASE_PAGE(m);
607 					copy_object->ref_count--;
608 					vm_object_unlock(copy_object);
609 					UNLOCK_AND_DEALLOCATE;
610 					VM_WAIT;
611 					goto RetryFault;
612 				}
613 
614 			 	if (copy_object->pager != NULL) {
615 					vm_object_unlock(object);
616 					vm_object_unlock(copy_object);
617 					UNLOCK_MAP;
618 
619 					page_exists = vm_pager_has_page(
620 							copy_object->pager,
621 							(copy_offset + copy_object->paging_offset));
622 
623 					vm_object_lock(copy_object);
624 
625 					/*
626 					 * Since the map is unlocked, someone
627 					 * else could have copied this object
628 					 * and put a different copy_object
629 					 * between the two.  Or, the last
630 					 * reference to the copy-object (other
631 					 * than the one we have) may have
632 					 * disappeared - if that has happened,
633 					 * we don't need to make the copy.
634 					 */
635 					if (copy_object->shadow != object ||
636 					    copy_object->ref_count == 1) {
637 						/*
638 						 *	Gaah... start over!
639 						 */
640 						FREE_PAGE(copy_m);
641 						vm_object_unlock(copy_object);
642 						vm_object_deallocate(copy_object);
643 							/* may block */
644 						vm_object_lock(object);
645 						goto RetryCopy;
646 					}
647 					vm_object_lock(object);
648 
649 					if (page_exists) {
650 						/*
651 						 *	We didn't need the page
652 						 */
653 						FREE_PAGE(copy_m);
654 					}
655 				}
656 			}
657 			if (!page_exists) {
658 				/*
659 				 *	Must copy page into copy-object.
660 				 */
661 				vm_page_copy(m, copy_m);
662 				copy_m->flags &= ~PG_FAKE;
663 
664 				/*
665 				 * Things to remember:
666 				 * 1. The copied page must be marked 'dirty'
667 				 *    so it will be paged out to the copy
668 				 *    object.
669 				 * 2. If the old page was in use by any users
670 				 *    of the copy-object, it must be removed
671 				 *    from all pmaps.  (We can't know which
672 				 *    pmaps use it.)
673 				 */
674 				vm_page_lock_queues();
675 				pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
676 						  VM_PROT_NONE);
677 				copy_m->flags &= ~PG_CLEAN;
678 				vm_page_activate(copy_m);	/* XXX */
679 				vm_page_unlock_queues();
680 
681 				PAGE_WAKEUP(copy_m);
682 			}
683 			/*
684 			 *	The reference count on copy_object must be
685 			 *	at least 2: one for our extra reference,
686 			 *	and at least one from the outside world
687 			 *	(we checked that when we last locked
688 			 *	copy_object).
689 			 */
690 			copy_object->ref_count--;
691 			vm_object_unlock(copy_object);
692 			m->flags &= ~PG_COPYONWRITE;
693 		}
694 	}
695 
696 	if (m->flags & (PG_ACTIVE | PG_INACTIVE))
697 		panic("vm_fault: active or inactive before retrying lookup");
698 
699 	/*
700 	 *	We must verify that the maps have not changed
701 	 *	since our last lookup.
702 	 */
703 
704 	if (!lookup_still_valid) {
705 		vm_object_t	retry_object;
706 		vm_offset_t	retry_offset;
707 		vm_prot_t	retry_prot;
708 
709 		/*
710 		 *	Since map entries may be pageable, make sure we can
711 		 *	take a page fault on them.
712 		 */
713 		vm_object_unlock(object);
714 
715 		/*
716 		 *	To avoid trying to write_lock the map while another
717 		 *	thread has it read_locked (in vm_map_pageable), we
718 		 *	do not try for write permission.  If the page is
719 		 *	still writable, we will get write permission.  If it
720 		 *	is not, or has been marked needs_copy, we enter the
721 		 *	mapping without write permission, and will merely
722 		 *	take another fault.
723 		 */
724 		result = vm_map_lookup(&map, vaddr,
725 				fault_type & ~VM_PROT_WRITE, &entry,
726 				&retry_object, &retry_offset, &retry_prot,
727 				&wired, &su);
728 
729 		vm_object_lock(object);
730 
731 		/*
732 		 *	If we don't need the page any longer, put it on the
733 		 *	active list (the easiest thing to do here).  If no
734 		 *	one needs it, pageout will grab it eventually.
735 		 */
736 
737 		if (result != KERN_SUCCESS) {
738 			RELEASE_PAGE(m);
739 			UNLOCK_AND_DEALLOCATE;
740 			return(result);
741 		}
742 
743 		lookup_still_valid = TRUE;
744 
745 		if ((retry_object != first_object) ||
746 				(retry_offset != first_offset)) {
747 			RELEASE_PAGE(m);
748 			UNLOCK_AND_DEALLOCATE;
749 			goto RetryFault;
750 		}
751 
752 		/*
753 		 *	Check whether the protection has changed or the object
754 		 *	has been copied while we left the map unlocked.
755 		 *	Changing from read to write permission is OK - we leave
756 		 *	the page write-protected, and catch the write fault.
757 		 *	Changing from write to read permission means that we
758 		 *	can't mark the page write-enabled after all.
759 		 */
760 		prot &= retry_prot;
761 		if (m->flags & PG_COPYONWRITE)
762 			prot &= ~VM_PROT_WRITE;
763 	}
764 
765 	/*
766 	 * (the various bits we're fiddling with here are locked by
767 	 * the object's lock)
768 	 */
769 
770 	/* XXX This distorts the meaning of the copy_on_write bit */
771 
772 	if (prot & VM_PROT_WRITE)
773 		m->flags &= ~PG_COPYONWRITE;
774 
775 	/*
776 	 *	It's critically important that a wired-down page be faulted
777 	 *	only once in each map for which it is wired.
778 	 */
779 
780 	if (m->flags & (PG_ACTIVE | PG_INACTIVE))
781 		panic("vm_fault: active or inactive before pmap_enter");
782 
783 	vm_object_unlock(object);
784 
785 	/*
786 	 *	Put this page into the physical map.
787 	 *	We had to do the unlock above because pmap_enter
788 	 *	may cause other faults.   We don't put the
789 	 *	page back on the active queue until later so
790 	 *	that the page-out daemon won't find us (yet).
791 	 */
792 
793 	pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
794 
795 	/*
796 	 *	If the page is not wired down, then put it where the
797 	 *	pageout daemon can find it.
798 	 */
799 	vm_object_lock(object);
800 	vm_page_lock_queues();
801 	if (change_wiring) {
802 		if (wired)
803 			vm_page_wire(m);
804 		else
805 			vm_page_unwire(m);
806 	}
807 	else
808 		vm_page_activate(m);
809 	vm_page_unlock_queues();
810 
811 	/*
812 	 *	Unlock everything, and return
813 	 */
814 
815 	PAGE_WAKEUP(m);
816 	UNLOCK_AND_DEALLOCATE;
817 
818 	return(KERN_SUCCESS);
819 
820 }
821 
822 /*
823  *	vm_fault_wire:
824  *
825  *	Wire down a range of virtual addresses in a map.
826  */
827 int
828 vm_fault_wire(map, start, end)
829 	vm_map_t	map;
830 	vm_offset_t	start, end;
831 {
832 	register vm_offset_t	va;
833 	register pmap_t		pmap;
834 	int			rv;
835 
836 	pmap = vm_map_pmap(map);
837 
838 	/*
839 	 *	Inform the physical mapping system that the
840 	 *	range of addresses may not fault, so that
841 	 *	page tables and such can be locked down as well.
842 	 */
843 
844 	pmap_pageable(pmap, start, end, FALSE);
845 
846 	/*
847 	 *	We simulate a fault to get the page and enter it
848 	 *	in the physical map.
849 	 */
850 
851 	for (va = start; va < end; va += PAGE_SIZE) {
852 		rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
853 		if (rv) {
854 			if (va != start)
855 				vm_fault_unwire(map, start, va);
856 			return(rv);
857 		}
858 	}
859 	return(KERN_SUCCESS);
860 }
861 
862 
863 /*
864  *	vm_fault_unwire:
865  *
866  *	Unwire a range of virtual addresses in a map.
867  */
868 void vm_fault_unwire(map, start, end)
869 	vm_map_t	map;
870 	vm_offset_t	start, end;
871 {
872 
873 	register vm_offset_t	va, pa;
874 	register pmap_t		pmap;
875 
876 	pmap = vm_map_pmap(map);
877 
878 	/*
879 	 *	Since the pages are wired down, we must be able to
880 	 *	get their mappings from the physical map system.
881 	 */
882 
883 	vm_page_lock_queues();
884 
885 	for (va = start; va < end; va += PAGE_SIZE) {
886 		pa = pmap_extract(pmap, va);
887 		if (pa == (vm_offset_t) 0) {
888 			panic("unwire: page not in pmap");
889 		}
890 		pmap_change_wiring(pmap, va, FALSE);
891 		vm_page_unwire(PHYS_TO_VM_PAGE(pa));
892 	}
893 	vm_page_unlock_queues();
894 
895 	/*
896 	 *	Inform the physical mapping system that the range
897 	 *	of addresses may fault, so that page tables and
898 	 *	such may be unwired themselves.
899 	 */
900 
901 	pmap_pageable(pmap, start, end, TRUE);
902 
903 }
904 
905 /*
906  *	Routine:
907  *		vm_fault_copy_entry
908  *	Function:
909  *		Copy all of the pages from a wired-down map entry to another.
910  *
911  *	In/out conditions:
912  *		The source and destination maps must be locked for write.
913  *		The source map entry must be wired down (or be a sharing map
914  *		entry corresponding to a main map entry that is wired down).
915  */
916 
917 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
918 	vm_map_t	dst_map;
919 	vm_map_t	src_map;
920 	vm_map_entry_t	dst_entry;
921 	vm_map_entry_t	src_entry;
922 {
923 
924 	vm_object_t	dst_object;
925 	vm_object_t	src_object;
926 	vm_offset_t	dst_offset;
927 	vm_offset_t	src_offset;
928 	vm_prot_t	prot;
929 	vm_offset_t	vaddr;
930 	vm_page_t	dst_m;
931 	vm_page_t	src_m;
932 
933 #ifdef	lint
934 	src_map++;
935 #endif
936 
937 	src_object = src_entry->object.vm_object;
938 	src_offset = src_entry->offset;
939 
940 	/*
941 	 *	Create the top-level object for the destination entry.
942 	 *	(Doesn't actually shadow anything - we copy the pages
943 	 *	directly.)
944 	 */
945 	dst_object = vm_object_allocate(
946 			(vm_size_t) (dst_entry->end - dst_entry->start));
947 
948 	dst_entry->object.vm_object = dst_object;
949 	dst_entry->offset = 0;
950 
951 	prot  = dst_entry->max_protection;
952 
953 	/*
954 	 *	Loop through all of the pages in the entry's range, copying
955 	 *	each one from the source object (it should be there) to the
956 	 *	destination object.
957 	 */
958 	for (vaddr = dst_entry->start, dst_offset = 0;
959 	     vaddr < dst_entry->end;
960 	     vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
961 
962 		/*
963 		 *	Allocate a page in the destination object
964 		 */
965 		vm_object_lock(dst_object);
966 		do {
967 			dst_m = vm_page_alloc(dst_object, dst_offset);
968 			if (dst_m == NULL) {
969 				vm_object_unlock(dst_object);
970 				VM_WAIT;
971 				vm_object_lock(dst_object);
972 			}
973 		} while (dst_m == NULL);
974 
975 		/*
976 		 *	Find the page in the source object, and copy it in.
977 		 *	(Because the source is wired down, the page will be
978 		 *	in memory.)
979 		 */
980 		vm_object_lock(src_object);
981 		src_m = vm_page_lookup(src_object, dst_offset + src_offset);
982 		if (src_m == NULL)
983 			panic("vm_fault_copy_wired: page missing");
984 
985 		vm_page_copy(src_m, dst_m);
986 
987 		/*
988 		 *	Enter it in the pmap...
989 		 */
990 		vm_object_unlock(src_object);
991 		vm_object_unlock(dst_object);
992 
993 		pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
994 				prot, FALSE);
995 
996 		/*
997 		 *	Mark it no longer busy, and put it on the active list.
998 		 */
999 		vm_object_lock(dst_object);
1000 		vm_page_lock_queues();
1001 		vm_page_activate(dst_m);
1002 		vm_page_unlock_queues();
1003 		PAGE_WAKEUP(dst_m);
1004 		vm_object_unlock(dst_object);
1005 	}
1006 
1007 }
1008