xref: /openbsd-src/sys/uvm/uvm_fault.c (revision 4016c7defd6f4ebd4fcdcfbd79f9ddc6bf06e457)
1 /*	$OpenBSD: uvm_fault.c,v 1.162 2025/01/22 10:52:09 mpi Exp $	*/
2 /*	$NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
29  */
30 
31 /*
32  * uvm_fault.c: fault handler
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/percpu.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/tracepoint.h>
43 
44 #include <uvm/uvm.h>
45 
46 /*
47  *
48  * a word on page faults:
49  *
50  * types of page faults we handle:
51  *
52  * CASE 1: upper layer faults                   CASE 2: lower layer faults
53  *
54  *    CASE 1A         CASE 1B                  CASE 2A        CASE 2B
55  *    read/write1     write>1                  read/write   +-cow_write/zero
56  *         |             |                         |        |
57  *      +--|--+       +--|--+     +-----+       +  |  +     | +-----+
58  * amap |  V  |       |  ---------> new |          |        | |  ^  |
59  *      +-----+       +-----+     +-----+       +  |  +     | +--|--+
60  *                                                 |        |    |
61  *      +-----+       +-----+                   +--|--+     | +--|--+
62  * uobj | d/c |       | d/c |                   |  V  |     +----+  |
63  *      +-----+       +-----+                   +-----+       +-----+
64  *
65  * d/c = don't care
66  *
67  *   case [0]: layerless fault
68  *	no amap or uobj is present.   this is an error.
69  *
70  *   case [1]: upper layer fault [anon active]
71  *     1A: [read] or [write with anon->an_ref == 1]
72  *		I/O takes place in upper level anon and uobj is not touched.
73  *     1B: [write with anon->an_ref > 1]
74  *		new anon is alloc'd and data is copied off ["COW"]
75  *
76  *   case [2]: lower layer fault [uobj]
77  *     2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
78  *		I/O takes place directly in object.
79  *     2B: [write to copy_on_write] or [read on NULL uobj]
80  *		data is "promoted" from uobj to a new anon.
81  *		if uobj is null, then we zero fill.
82  *
83  * we follow the standard UVM locking protocol ordering:
84  *
85  * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
86  * we hold a PG_BUSY page if we unlock for I/O
87  *
88  *
89  * the code is structured as follows:
90  *
91  *     - init the "IN" params in the ufi structure
92  *   ReFault: (ERESTART returned to the loop in uvm_fault)
93  *     - do lookups [locks maps], check protection, handle needs_copy
94  *     - check for case 0 fault (error)
95  *     - establish "range" of fault
96  *     - if we have an amap lock it and extract the anons
97  *     - if sequential advice deactivate pages behind us
98  *     - at the same time check pmap for unmapped areas and anon for pages
99  *	 that we could map in (and do map it if found)
100  *     - check object for resident pages that we could map in
101  *     - if (case 2) goto Case2
102  *     - >>> handle case 1
103  *           - ensure source anon is resident in RAM
104  *           - if case 1B alloc new anon and copy from source
105  *           - map the correct page in
106  *   Case2:
107  *     - >>> handle case 2
108  *           - ensure source page is resident (if uobj)
109  *           - if case 2B alloc new anon and copy from source (could be zero
110  *		fill if uobj == NULL)
111  *           - map the correct page in
112  *     - done!
113  *
114  * note on paging:
115  *   if we have to do I/O we place a PG_BUSY page in the correct object,
116  * unlock everything, and do the I/O.   when I/O is done we must reverify
117  * the state of the world before assuming that our data structures are
118  * valid.   [because mappings could change while the map is unlocked]
119  *
120  *  alternative 1: unbusy the page in question and restart the page fault
121  *    from the top (ReFault).   this is easy but does not take advantage
122  *    of the information that we already have from our previous lookup,
123  *    although it is possible that the "hints" in the vm_map will help here.
124  *
125  * alternative 2: the system already keeps track of a "version" number of
126  *    a map.   [i.e. every time you write-lock a map (e.g. to change a
127  *    mapping) you bump the version number up by one...]   so, we can save
128  *    the version number of the map before we release the lock and start I/O.
129  *    then when I/O is done we can relock and check the version numbers
130  *    to see if anything changed.    this might save us some over 1 because
131  *    we don't have to unbusy the page and may be less compares(?).
132  *
133  * alternative 3: put in backpointers or a way to "hold" part of a map
134  *    in place while I/O is in progress.   this could be complex to
135  *    implement (especially with structures like amap that can be referenced
136  *    by multiple map entries, and figuring out what should wait could be
137  *    complex as well...).
138  *
139  * we use alternative 2.  given that we are multi-threaded now we may want
140  * to reconsider the choice.
141  */
142 
143 /*
144  * local data structures
145  */
146 struct uvm_advice {
147 	int nback;
148 	int nforw;
149 };
150 
151 /*
152  * page range array: set up in uvmfault_init().
153  */
154 static struct uvm_advice uvmadvice[MADV_MASK + 1];
155 
156 #define UVM_MAXRANGE 16	/* must be max() of nback+nforw+1 */
157 
158 /*
159  * private prototypes
160  */
161 static void uvmfault_amapcopy(struct uvm_faultinfo *);
162 static inline void uvmfault_anonflush(struct vm_anon **, int);
163 void	uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
164 void	uvmfault_update_stats(struct uvm_faultinfo *);
165 
166 /*
167  * inline functions
168  */
169 /*
170  * uvmfault_anonflush: try and deactivate pages in specified anons
171  *
172  * => does not have to deactivate page if it is busy
173  */
174 static inline void
175 uvmfault_anonflush(struct vm_anon **anons, int n)
176 {
177 	int lcv;
178 	struct vm_page *pg;
179 
180 	for (lcv = 0; lcv < n; lcv++) {
181 		if (anons[lcv] == NULL)
182 			continue;
183 		KASSERT(rw_lock_held(anons[lcv]->an_lock));
184 		pg = anons[lcv]->an_page;
185 		if (pg && (pg->pg_flags & PG_BUSY) == 0) {
186 			uvm_lock_pageq();
187 			if (pg->wire_count == 0) {
188 				uvm_pagedeactivate(pg);
189 			}
190 			uvm_unlock_pageq();
191 		}
192 	}
193 }
194 
195 /*
196  * normal functions
197  */
198 /*
199  * uvmfault_init: compute proper values for the uvmadvice[] array.
200  */
201 void
202 uvmfault_init(void)
203 {
204 	int npages;
205 
206 	npages = atop(16384);
207 	if (npages > 0) {
208 		KASSERT(npages <= UVM_MAXRANGE / 2);
209 		uvmadvice[MADV_NORMAL].nforw = npages;
210 		uvmadvice[MADV_NORMAL].nback = npages - 1;
211 	}
212 
213 	npages = atop(32768);
214 	if (npages > 0) {
215 		KASSERT(npages <= UVM_MAXRANGE / 2);
216 		uvmadvice[MADV_SEQUENTIAL].nforw = npages - 1;
217 		uvmadvice[MADV_SEQUENTIAL].nback = npages;
218 	}
219 }
220 
221 /*
222  * uvmfault_amapcopy: clear "needs_copy" in a map.
223  *
224  * => called with VM data structures unlocked (usually, see below)
225  * => we get a write lock on the maps and clear needs_copy for a VA
226  * => if we are out of RAM we sleep (waiting for more)
227  */
228 static void
229 uvmfault_amapcopy(struct uvm_faultinfo *ufi)
230 {
231 	for (;;) {
232 		/*
233 		 * no mapping?  give up.
234 		 */
235 		if (uvmfault_lookup(ufi, TRUE) == FALSE)
236 			return;
237 
238 		/*
239 		 * copy if needed.
240 		 */
241 		if (UVM_ET_ISNEEDSCOPY(ufi->entry))
242 			amap_copy(ufi->map, ufi->entry, M_NOWAIT,
243 				UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE,
244 				ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
245 
246 		/*
247 		 * didn't work?  must be out of RAM.   unlock and sleep.
248 		 */
249 		if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
250 			uvmfault_unlockmaps(ufi, TRUE);
251 			uvm_wait("fltamapcopy");
252 			continue;
253 		}
254 
255 		/*
256 		 * got it!   unlock and return.
257 		 */
258 		uvmfault_unlockmaps(ufi, TRUE);
259 		return;
260 	}
261 	/*NOTREACHED*/
262 }
263 
264 /*
265  * uvmfault_anonget: get data in an anon into a non-busy, non-released
266  * page in that anon.
267  *
268  * => Map, amap and thus anon should be locked by caller.
269  * => If we fail, we unlock everything and error is returned.
270  * => If we are successful, return with everything still locked.
271  * => We do not move the page on the queues [gets moved later].  If we
272  *    allocate a new page [we_own], it gets put on the queues.  Either way,
273  *    the result is that the page is on the queues at return time
274  */
275 int
276 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
277     struct vm_anon *anon)
278 {
279 	struct vm_page *pg;
280 	int error;
281 
282 	KASSERT(rw_lock_held(anon->an_lock));
283 	KASSERT(anon->an_lock == amap->am_lock);
284 
285 	/* Increment the counters.*/
286 	counters_inc(uvmexp_counters, flt_anget);
287 	if (anon->an_page) {
288 		curproc->p_ru.ru_minflt++;
289 	} else {
290 		curproc->p_ru.ru_majflt++;
291 	}
292 	error = 0;
293 
294 	/*
295 	 * Loop until we get the anon data, or fail.
296 	 */
297 	for (;;) {
298 		boolean_t we_own, locked;
299 		/*
300 		 * Note: 'we_own' will become true if we set PG_BUSY on a page.
301 		 */
302 		we_own = FALSE;
303 		pg = anon->an_page;
304 
305 		/*
306 		 * Is page resident?  Make sure it is not busy/released.
307 		 */
308 		if (pg) {
309 			KASSERT(pg->pg_flags & PQ_ANON);
310 			KASSERT(pg->uanon == anon);
311 
312 			/*
313 			 * if the page is busy, we drop all the locks and
314 			 * try again.
315 			 */
316 			if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0)
317 				return 0;
318 			counters_inc(uvmexp_counters, flt_pgwait);
319 
320 			/*
321 			 * The last unlock must be an atomic unlock and wait
322 			 * on the owner of page.
323 			 */
324 			KASSERT(pg->uobject == NULL);
325 			uvmfault_unlockall(ufi, NULL, NULL);
326 			uvm_pagewait(pg, anon->an_lock, "anonget");
327 		} else {
328 			/*
329 			 * No page, therefore allocate one.
330 			 */
331 			pg = uvm_pagealloc(NULL, 0, anon, 0);
332 			if (pg == NULL) {
333 				/* Out of memory.  Wait a little. */
334 				uvmfault_unlockall(ufi, amap, NULL);
335 				counters_inc(uvmexp_counters, flt_noram);
336 				uvm_wait("flt_noram1");
337 			} else {
338 				/* PG_BUSY bit is set. */
339 				we_own = TRUE;
340 				uvmfault_unlockall(ufi, amap, NULL);
341 
342 				/*
343 				 * Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into
344 				 * the uvm_swap_get() function with all data
345 				 * structures unlocked.  Note that it is OK
346 				 * to read an_swslot here, because we hold
347 				 * PG_BUSY on the page.
348 				 */
349 				counters_inc(uvmexp_counters, pageins);
350 				error = uvm_swap_get(pg, anon->an_swslot,
351 				    PGO_SYNCIO);
352 
353 				/*
354 				 * We clean up after the I/O below in the
355 				 * 'we_own' case.
356 				 */
357 			}
358 		}
359 
360 		/*
361 		 * Re-lock the map and anon.
362 		 */
363 		locked = uvmfault_relock(ufi);
364 		if (locked || we_own) {
365 			rw_enter(anon->an_lock, RW_WRITE);
366 		}
367 
368 		/*
369 		 * If we own the page (i.e. we set PG_BUSY), then we need
370 		 * to clean up after the I/O.  There are three cases to
371 		 * consider:
372 		 *
373 		 * 1) Page was released during I/O: free anon and ReFault.
374 		 * 2) I/O not OK.  Free the page and cause the fault to fail.
375 		 * 3) I/O OK!  Activate the page and sync with the non-we_own
376 		 *    case (i.e. drop anon lock if not locked).
377 		 */
378 		if (we_own) {
379 			if (pg->pg_flags & PG_WANTED) {
380 				wakeup(pg);
381 			}
382 
383 			/*
384 			 * if we were RELEASED during I/O, then our anon is
385 			 * no longer part of an amap.   we need to free the
386 			 * anon and try again.
387 			 */
388 			if (pg->pg_flags & PG_RELEASED) {
389 				KASSERT(anon->an_ref == 0);
390 				/*
391 				 * Released while we had unlocked amap.
392 				 */
393 				if (locked)
394 					uvmfault_unlockall(ufi, NULL, NULL);
395 				uvm_anon_release(anon);	/* frees page for us */
396 				counters_inc(uvmexp_counters, flt_pgrele);
397 				return ERESTART;	/* refault! */
398 			}
399 
400 			if (error != VM_PAGER_OK) {
401 				KASSERT(error != VM_PAGER_PEND);
402 
403 				/* remove page from anon */
404 				anon->an_page = NULL;
405 
406 				/*
407 				 * Remove the swap slot from the anon and
408 				 * mark the anon as having no real slot.
409 				 * Do not free the swap slot, thus preventing
410 				 * it from being used again.
411 				 */
412 				uvm_swap_markbad(anon->an_swslot, 1);
413 				anon->an_swslot = SWSLOT_BAD;
414 
415 				/*
416 				 * Note: page was never !PG_BUSY, so it
417 				 * cannot be mapped and thus no need to
418 				 * pmap_page_protect() it.
419 				 */
420 				uvm_lock_pageq();
421 				uvm_pagefree(pg);
422 				uvm_unlock_pageq();
423 
424 				if (locked) {
425 					uvmfault_unlockall(ufi, NULL, NULL);
426 				}
427 				rw_exit(anon->an_lock);
428 				/*
429 				 * An error occurred while trying to bring
430 				 * in the page -- this is the only error we
431 				 * return right now.
432 				 */
433 				return EACCES;	/* XXX */
434 			}
435 
436 			/*
437 			 * We have successfully read the page, activate it.
438 			 */
439 			pmap_clear_modify(pg);
440 			uvm_lock_pageq();
441 			uvm_pageactivate(pg);
442 			uvm_unlock_pageq();
443 			atomic_clearbits_int(&pg->pg_flags,
444 			    PG_WANTED|PG_BUSY|PG_FAKE);
445 			UVM_PAGE_OWN(pg, NULL);
446 		}
447 
448 		/*
449 		 * We were not able to re-lock the map - restart the fault.
450 		 */
451 		if (!locked) {
452 			if (we_own) {
453 				rw_exit(anon->an_lock);
454 			}
455 			return ERESTART;
456 		}
457 
458 		/*
459 		 * Verify that no one has touched the amap and moved
460 		 * the anon on us.
461 		 */
462 		if (ufi != NULL && amap_lookup(&ufi->entry->aref,
463 		    ufi->orig_rvaddr - ufi->entry->start) != anon) {
464 			uvmfault_unlockall(ufi, amap, NULL);
465 			return ERESTART;
466 		}
467 
468 		/*
469 		 * Retry..
470 		 */
471 		counters_inc(uvmexp_counters, flt_anretry);
472 		continue;
473 
474 	}
475 	/*NOTREACHED*/
476 }
477 
478 /*
479  * uvmfault_promote: promote data to a new anon.  used for 1B and 2B.
480  *
481  *	1. allocate an anon and a page.
482  *	2. fill its contents.
483  *
484  * => if we fail (result != 0) we unlock everything.
485  * => on success, return a new locked anon via 'nanon'.
486  * => it's caller's responsibility to put the promoted nanon->an_page to the
487  *    page queue.
488  */
489 int
490 uvmfault_promote(struct uvm_faultinfo *ufi,
491     struct vm_page *uobjpage,
492     struct vm_anon **nanon, /* OUT: allocated anon */
493     struct vm_page **npg)
494 {
495 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
496 	struct uvm_object *uobj = NULL;
497 	struct vm_anon *anon;
498 	struct vm_page *pg = NULL;
499 
500 	if (uobjpage != PGO_DONTCARE)
501 		uobj = uobjpage->uobject;
502 
503 	KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
504 
505 	anon = uvm_analloc();
506 	if (anon) {
507 		anon->an_lock = amap->am_lock;
508 		pg = uvm_pagealloc(NULL, 0, anon,
509 		    (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
510 	}
511 
512 	/* check for out of RAM */
513 	if (anon == NULL || pg == NULL) {
514 		uvmfault_unlockall(ufi, amap, uobj);
515 		if (anon == NULL)
516 			counters_inc(uvmexp_counters, flt_noanon);
517 		else {
518 			anon->an_lock = NULL;
519 			anon->an_ref--;
520 			uvm_anfree(anon);
521 			counters_inc(uvmexp_counters, flt_noram);
522 		}
523 
524 		if (uvm_swapisfull())
525 			return ENOMEM;
526 
527 		/* out of RAM, wait for more */
528 		if (anon == NULL)
529 			uvm_anwait();
530 		else
531 			uvm_wait("flt_noram3");
532 		return ERESTART;
533 	}
534 
535 	/*
536 	 * copy the page [pg now dirty]
537 	 */
538 	if (uobjpage != PGO_DONTCARE)
539 		uvm_pagecopy(uobjpage, pg);
540 
541 	*nanon = anon;
542 	*npg = pg;
543 	return 0;
544 }
545 
546 /*
547  * Update statistics after fault resolution.
548  * - maxrss
549  */
550 void
551 uvmfault_update_stats(struct uvm_faultinfo *ufi)
552 {
553 	struct vm_map		*map;
554 	struct proc		*p;
555 	vsize_t			 res;
556 
557 	map = ufi->orig_map;
558 
559 	/*
560 	 * If this is a nested pmap (eg, a virtual machine pmap managed
561 	 * by vmm(4) on amd64/i386), don't do any updating, just return.
562 	 *
563 	 * pmap_nested() on other archs is #defined to 0, so this is a
564 	 * no-op.
565 	 */
566 	if (pmap_nested(map->pmap))
567 		return;
568 
569 	/* Update the maxrss for the process. */
570 	if (map->flags & VM_MAP_ISVMSPACE) {
571 		p = curproc;
572 		KASSERT(p != NULL && &p->p_vmspace->vm_map == map);
573 
574 		res = pmap_resident_count(map->pmap);
575 		/* Convert res from pages to kilobytes. */
576 		res <<= (PAGE_SHIFT - 10);
577 
578 		if (p->p_ru.ru_maxrss < res)
579 			p->p_ru.ru_maxrss = res;
580 	}
581 }
582 
583 /*
584  *   F A U L T   -   m a i n   e n t r y   p o i n t
585  */
586 
587 /*
588  * uvm_fault: page fault handler
589  *
590  * => called from MD code to resolve a page fault
591  * => VM data structures usually should be unlocked.   however, it is
592  *	possible to call here with the main map locked if the caller
593  *	gets a write lock, sets it recursive, and then calls us (c.f.
594  *	uvm_map_pageable).   this should be avoided because it keeps
595  *	the map locked off during I/O.
596  * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
597  */
598 #define MASK(entry)     (UVM_ET_ISCOPYONWRITE(entry) ? \
599 			 ~PROT_WRITE : PROT_MASK)
600 struct uvm_faultctx {
601 	/*
602 	 * the following members are set up by uvm_fault_check() and
603 	 * read-only after that.
604 	 */
605 	vm_prot_t enter_prot;
606 	vm_prot_t access_type;
607 	vaddr_t startva;
608 	int npages;
609 	int centeridx;
610 	boolean_t narrow;
611 	boolean_t wired;
612 	paddr_t pa_flags;
613 	boolean_t promote;
614 	int lower_lock_type;
615 };
616 
617 int		uvm_fault_check(
618 		    struct uvm_faultinfo *, struct uvm_faultctx *,
619 		    struct vm_anon ***, vm_fault_t);
620 
621 int		uvm_fault_upper(
622 		    struct uvm_faultinfo *, struct uvm_faultctx *,
623 		    struct vm_anon **);
624 boolean_t	uvm_fault_upper_lookup(
625 		    struct uvm_faultinfo *, const struct uvm_faultctx *,
626 		    struct vm_anon **, struct vm_page **);
627 
628 int		uvm_fault_lower(
629 		    struct uvm_faultinfo *, struct uvm_faultctx *,
630 		    struct vm_page **);
631 int		uvm_fault_lower_io(
632 		    struct uvm_faultinfo *, struct uvm_faultctx *,
633 		    struct uvm_object **, struct vm_page **);
634 
635 int
636 uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
637     vm_prot_t access_type)
638 {
639 	struct uvm_faultinfo ufi;
640 	struct uvm_faultctx flt;
641 	boolean_t shadowed;
642 	struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
643 	struct vm_page *pages[UVM_MAXRANGE];
644 	int error;
645 
646 	counters_inc(uvmexp_counters, faults);
647 	TRACEPOINT(uvm, fault, vaddr, fault_type, access_type, NULL);
648 
649 	/*
650 	 * init the IN parameters in the ufi
651 	 */
652 	ufi.orig_map = orig_map;
653 	ufi.orig_rvaddr = trunc_page(vaddr);
654 	ufi.orig_size = PAGE_SIZE;	/* can't get any smaller than this */
655 	flt.access_type = access_type;
656 	flt.narrow = FALSE;		/* assume normal fault for now */
657 	flt.wired = FALSE;		/* assume non-wired fault for now */
658 	flt.lower_lock_type = RW_WRITE;	/* exclusive lock for now */
659 
660 	error = ERESTART;
661 	while (error == ERESTART) { /* ReFault: */
662 		anons = anons_store;
663 
664 		error = uvm_fault_check(&ufi, &flt, &anons, fault_type);
665 		if (error != 0)
666 			continue;
667 
668 		/* True if there is an anon at the faulting address */
669 		shadowed = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
670 		if (shadowed == TRUE) {
671 			/* case 1: fault on an anon in our amap */
672 			error = uvm_fault_upper(&ufi, &flt, anons);
673 		} else {
674 			struct uvm_object *uobj = ufi.entry->object.uvm_obj;
675 
676 			/*
677 			 * if the desired page is not shadowed by the amap and
678 			 * we have a backing object, then we check to see if
679 			 * the backing object would prefer to handle the fault
680 			 * itself (rather than letting us do it with the usual
681 			 * pgo_get hook).  the backing object signals this by
682 			 * providing a pgo_fault routine.
683 			 */
684 			if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
685 				rw_enter(uobj->vmobjlock, RW_WRITE);
686 				KERNEL_LOCK();
687 				error = uobj->pgops->pgo_fault(&ufi,
688 				    flt.startva, pages, flt.npages,
689 				    flt.centeridx, fault_type, flt.access_type,
690 				    PGO_LOCKED);
691 				KERNEL_UNLOCK();
692 			} else {
693 				/* case 2: fault on backing obj or zero fill */
694 				error = uvm_fault_lower(&ufi, &flt, pages);
695 			}
696 		}
697 	}
698 
699 	return error;
700 }
701 
702 /*
703  * uvm_fault_check: check prot, handle needs-copy, etc.
704  *
705  *	1. lookup entry.
706  *	2. check protection.
707  *	3. adjust fault condition (mainly for simulated fault).
708  *	4. handle needs-copy (lazy amap copy).
709  *	5. establish range of interest for neighbor fault (aka pre-fault).
710  *	6. look up anons (if amap exists).
711  *	7. flush pages (if MADV_SEQUENTIAL)
712  *
713  * => called with nothing locked.
714  * => if we fail (result != 0) we unlock everything.
715  * => initialize/adjust many members of flt.
716  */
717 int
718 uvm_fault_check(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
719     struct vm_anon ***ranons, vm_fault_t fault_type)
720 {
721 	struct vm_amap *amap;
722 	struct uvm_object *uobj;
723 	int nback, nforw;
724 
725 	/*
726 	 * lookup and lock the maps
727 	 */
728 	if (uvmfault_lookup(ufi, FALSE) == FALSE) {
729 		return EFAULT;
730 	}
731 	/* locked: maps(read) */
732 
733 #ifdef DIAGNOSTIC
734 	if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0)
735 		panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
736 		    ufi->map, ufi->orig_rvaddr);
737 #endif
738 
739 	/*
740 	 * check protection
741 	 */
742 	if ((ufi->entry->protection & flt->access_type) != flt->access_type) {
743 		uvmfault_unlockmaps(ufi, FALSE);
744 		return EACCES;
745 	}
746 
747 	/*
748 	 * "enter_prot" is the protection we want to enter the page in at.
749 	 * for certain pages (e.g. copy-on-write pages) this protection can
750 	 * be more strict than ufi->entry->protection.  "wired" means either
751 	 * the entry is wired or we are fault-wiring the pg.
752 	 */
753 	flt->enter_prot = ufi->entry->protection;
754 	flt->pa_flags = UVM_ET_ISWC(ufi->entry) ? PMAP_WC : 0;
755 	if (VM_MAPENT_ISWIRED(ufi->entry) || (fault_type == VM_FAULT_WIRE)) {
756 		flt->wired = TRUE;
757 		flt->access_type = flt->enter_prot; /* full access for wired */
758 		/*  don't look for neighborhood * pages on "wire" fault */
759 		flt->narrow = TRUE;
760 	}
761 
762 	/* handle "needs_copy" case. */
763 	if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
764 		if ((flt->access_type & PROT_WRITE) ||
765 		    (ufi->entry->object.uvm_obj == NULL)) {
766 			/* need to clear */
767 			uvmfault_unlockmaps(ufi, FALSE);
768 			uvmfault_amapcopy(ufi);
769 			counters_inc(uvmexp_counters, flt_amcopy);
770 			return ERESTART;
771 		} else {
772 			/*
773 			 * ensure that we pmap_enter page R/O since
774 			 * needs_copy is still true
775 			 */
776 			flt->enter_prot &= ~PROT_WRITE;
777 		}
778 	}
779 
780 	/*
781 	 * identify the players
782 	 */
783 	amap = ufi->entry->aref.ar_amap;	/* upper layer */
784 	uobj = ufi->entry->object.uvm_obj;	/* lower layer */
785 
786 	/*
787 	 * check for a case 0 fault.  if nothing backing the entry then
788 	 * error now.
789 	 */
790 	if (amap == NULL && uobj == NULL) {
791 		uvmfault_unlockmaps(ufi, FALSE);
792 		return EFAULT;
793 	}
794 
795 	/*
796 	 * for a case 2B fault waste no time on adjacent pages because
797 	 * they are likely already entered.
798 	 */
799 	if (uobj != NULL && amap != NULL &&
800 	    (flt->access_type & PROT_WRITE) != 0) {
801 		/* wide fault (!narrow) */
802 		flt->narrow = TRUE;
803 	}
804 
805 	/*
806 	 * establish range of interest based on advice from mapper
807 	 * and then clip to fit map entry.   note that we only want
808 	 * to do this the first time through the fault.   if we
809 	 * ReFault we will disable this by setting "narrow" to true.
810 	 */
811 	if (flt->narrow == FALSE) {
812 
813 		/* wide fault (!narrow) */
814 		nback = min(uvmadvice[ufi->entry->advice].nback,
815 		    (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
816 		flt->startva = ufi->orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT);
817 		nforw = min(uvmadvice[ufi->entry->advice].nforw,
818 		    ((ufi->entry->end - ufi->orig_rvaddr) >> PAGE_SHIFT) - 1);
819 		/*
820 		 * note: "-1" because we don't want to count the
821 		 * faulting page as forw
822 		 */
823 		flt->npages = nback + nforw + 1;
824 		flt->centeridx = nback;
825 
826 		flt->narrow = TRUE;	/* ensure only once per-fault */
827 	} else {
828 		/* narrow fault! */
829 		nback = nforw = 0;
830 		flt->startva = ufi->orig_rvaddr;
831 		flt->npages = 1;
832 		flt->centeridx = 0;
833 	}
834 
835 	/*
836 	 * if we've got an amap then lock it and extract current anons.
837 	 */
838 	if (amap) {
839 		amap_lock(amap, RW_WRITE);
840 		amap_lookups(&ufi->entry->aref,
841 		    flt->startva - ufi->entry->start, *ranons, flt->npages);
842 	} else {
843 		*ranons = NULL;	/* to be safe */
844 	}
845 
846 	/*
847 	 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
848 	 * now and then forget about them (for the rest of the fault).
849 	 */
850 	if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
851 		/* flush back-page anons? */
852 		if (amap)
853 			uvmfault_anonflush(*ranons, nback);
854 
855 		/*
856 		 * flush object?
857 		 */
858 		if (uobj) {
859 			voff_t uoff;
860 
861 			uoff = (flt->startva - ufi->entry->start) + ufi->entry->offset;
862 			rw_enter(uobj->vmobjlock, RW_WRITE);
863 			(void) uobj->pgops->pgo_flush(uobj, uoff, uoff +
864 			    ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE);
865 			rw_exit(uobj->vmobjlock);
866 		}
867 
868 		/* now forget about the backpages */
869 		if (amap)
870 			*ranons += nback;
871 		flt->startva += ((vsize_t)nback << PAGE_SHIFT);
872 		flt->npages -= nback;
873 		flt->centeridx = 0;
874 	}
875 
876 	return 0;
877 }
878 
879 /*
880  * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
881  *
882  * iterate range of interest:
883  *	1. check if h/w mapping exists.  if yes, we don't care
884  *	2. check if anon exists.  if not, page is lower.
885  *	3. if anon exists, enter h/w mapping for neighbors.
886  *
887  * => called with amap locked (if exists).
888  */
889 boolean_t
890 uvm_fault_upper_lookup(struct uvm_faultinfo *ufi,
891     const struct uvm_faultctx *flt, struct vm_anon **anons,
892     struct vm_page **pages)
893 {
894 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
895 	struct vm_anon *anon;
896 	struct vm_page *pg;
897 	boolean_t shadowed;
898 	vaddr_t currva;
899 	paddr_t pa;
900 	int lcv, entered = 0;
901 
902 	/* locked: maps(read), amap(if there) */
903 	KASSERT(amap == NULL ||
904 	    rw_write_held(amap->am_lock));
905 
906 	/*
907 	 * map in the backpages and frontpages we found in the amap in hopes
908 	 * of preventing future faults.    we also init the pages[] array as
909 	 * we go.
910 	 */
911 	currva = flt->startva;
912 	shadowed = FALSE;
913 	for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
914 		/*
915 		 * unmapped or center page.   check if any anon at this level.
916 		 */
917 		if (amap == NULL || anons[lcv] == NULL) {
918 			pages[lcv] = NULL;
919 			continue;
920 		}
921 
922 		/*
923 		 * check for present page and map if possible.
924 		 */
925 		pages[lcv] = PGO_DONTCARE;
926 		if (lcv == flt->centeridx) {	/* save center for later! */
927 			shadowed = TRUE;
928 			continue;
929 		}
930 
931 		anon = anons[lcv];
932 		pg = anon->an_page;
933 
934 		KASSERT(anon->an_lock == amap->am_lock);
935 
936 		/*
937 		 * ignore busy pages.
938 		 * don't play with VAs that are already mapped.
939 		 */
940 		if (pg && (pg->pg_flags & (PG_RELEASED|PG_BUSY)) == 0 &&
941 		    !pmap_extract(ufi->orig_map->pmap, currva, &pa)) {
942 			uvm_lock_pageq();
943 			uvm_pageactivate(pg);	/* reactivate */
944 			uvm_unlock_pageq();
945 			counters_inc(uvmexp_counters, flt_namap);
946 
947 			/* No fault-ahead when wired. */
948 			KASSERT(flt->wired == FALSE);
949 
950 			/*
951 			 * Since this isn't the page that's actually faulting,
952 			 * ignore pmap_enter() failures; it's not critical
953 			 * that we enter these right now.
954 			 */
955 			(void) pmap_enter(ufi->orig_map->pmap, currva,
956 			    VM_PAGE_TO_PHYS(pg) | flt->pa_flags,
957 			    (anon->an_ref > 1) ?
958 			    (flt->enter_prot & ~PROT_WRITE) : flt->enter_prot,
959 			    PMAP_CANFAIL);
960 			entered++;
961 		}
962 	}
963 	if (entered > 0)
964 		pmap_update(ufi->orig_map->pmap);
965 
966 	return shadowed;
967 }
968 
969 /*
970  * uvm_fault_upper: handle upper fault.
971  *
972  *	1. acquire anon lock.
973  *	2. get anon.  let uvmfault_anonget do the dirty work.
974  *	3. if COW, promote data to new anon
975  *	4. enter h/w mapping
976  */
977 int
978 uvm_fault_upper(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
979    struct vm_anon **anons)
980 {
981 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
982 	struct vm_anon *oanon, *anon = anons[flt->centeridx];
983 	struct vm_page *pg = NULL;
984 	int error, ret;
985 
986 	/* locked: maps(read), amap, anon */
987 	KASSERT(rw_write_held(amap->am_lock));
988 	KASSERT(anon->an_lock == amap->am_lock);
989 
990 	/*
991 	 * no matter if we have case 1A or case 1B we are going to need to
992 	 * have the anon's memory resident.   ensure that now.
993 	 */
994 	/*
995 	 * let uvmfault_anonget do the dirty work.
996 	 * if it fails (!OK) it will unlock everything for us.
997 	 * if it succeeds, locks are still valid and locked.
998 	 * also, if it is OK, then the anon's page is on the queues.
999 	 */
1000 	error = uvmfault_anonget(ufi, amap, anon);
1001 	switch (error) {
1002 	case 0:
1003 		break;
1004 
1005 	case ERESTART:
1006 		return ERESTART;
1007 
1008 	default:
1009 		return error;
1010 	}
1011 
1012 	KASSERT(rw_write_held(amap->am_lock));
1013 	KASSERT(anon->an_lock == amap->am_lock);
1014 
1015 	/*
1016 	 * if we are case 1B then we will need to allocate a new blank
1017 	 * anon to transfer the data into.   note that we have a lock
1018 	 * on anon, so no one can busy or release the page until we are done.
1019 	 * also note that the ref count can't drop to zero here because
1020 	 * it is > 1 and we are only dropping one ref.
1021 	 *
1022 	 * in the (hopefully very rare) case that we are out of RAM we
1023 	 * will unlock, wait for more RAM, and refault.
1024 	 *
1025 	 * if we are out of anon VM we wait for RAM to become available.
1026 	 */
1027 
1028 	if ((flt->access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
1029 		/* promoting requires a write lock. */
1030 		KASSERT(rw_write_held(amap->am_lock));
1031 
1032 		counters_inc(uvmexp_counters, flt_acow);
1033 		oanon = anon;		/* oanon = old */
1034 
1035 		error = uvmfault_promote(ufi, oanon->an_page, &anon, &pg);
1036 		if (error)
1037 			return error;
1038 
1039 		/* un-busy! new page */
1040 		atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
1041 		UVM_PAGE_OWN(pg, NULL);
1042 		ret = amap_add(&ufi->entry->aref,
1043 		    ufi->orig_rvaddr - ufi->entry->start, anon, 1);
1044 		KASSERT(ret == 0);
1045 
1046 		KASSERT(anon->an_lock == oanon->an_lock);
1047 
1048 		/* deref: can not drop to zero here by defn! */
1049 		KASSERT(oanon->an_ref > 1);
1050 		oanon->an_ref--;
1051 
1052 #if defined(MULTIPROCESSOR) && !defined(__HAVE_PMAP_MPSAFE_ENTER_COW)
1053 		/*
1054 		 * If there are multiple threads, either uvm or the
1055 		 * pmap has to make sure no threads see the old RO
1056 		 * mapping once any have seen the new RW mapping.
1057 		 * uvm does it by inserting the new mapping RO and
1058 		 * letting it fault again.
1059 		 * This is only a problem on MP systems.
1060 		 */
1061 		if (P_HASSIBLING(curproc)) {
1062 			flt->enter_prot &= ~PROT_WRITE;
1063 			flt->access_type &= ~PROT_WRITE;
1064 		}
1065 #endif
1066 
1067 		/*
1068 		 * note: anon is _not_ locked, but we have the sole references
1069 		 * to in from amap.
1070 		 * thus, no one can get at it until we are done with it.
1071 		 */
1072 	} else {
1073 		counters_inc(uvmexp_counters, flt_anon);
1074 		oanon = anon;
1075 		pg = anon->an_page;
1076 		if (anon->an_ref > 1)     /* disallow writes to ref > 1 anons */
1077 			flt->enter_prot = flt->enter_prot & ~PROT_WRITE;
1078 	}
1079 
1080 	/*
1081 	 * now map the page in .
1082 	 */
1083 	if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
1084 	    VM_PAGE_TO_PHYS(pg) | flt->pa_flags, flt->enter_prot,
1085 	    flt->access_type | PMAP_CANFAIL | (flt->wired ? PMAP_WIRED : 0)) != 0) {
1086 		/*
1087 		 * No need to undo what we did; we can simply think of
1088 		 * this as the pmap throwing away the mapping information.
1089 		 *
1090 		 * We do, however, have to go through the ReFault path,
1091 		 * as the map may change while we're asleep.
1092 		 */
1093 		uvmfault_unlockall(ufi, amap, NULL);
1094 		if (uvm_swapisfull()) {
1095 			/* XXX instrumentation */
1096 			return ENOMEM;
1097 		}
1098 #ifdef __HAVE_PMAP_POPULATE
1099 		pmap_populate(ufi->orig_map->pmap, ufi->orig_rvaddr);
1100 #else
1101 		/* XXX instrumentation */
1102 		uvm_wait("flt_pmfail1");
1103 #endif
1104 		return ERESTART;
1105 	}
1106 
1107 	/*
1108 	 * ... update the page queues.
1109 	 */
1110 	uvm_lock_pageq();
1111 	if (flt->wired) {
1112 		uvm_pagewire(pg);
1113 	} else {
1114 		uvm_pageactivate(pg);
1115 	}
1116 	uvm_unlock_pageq();
1117 
1118 	if (flt->wired) {
1119 		/*
1120 		 * since the now-wired page cannot be paged out,
1121 		 * release its swap resources for others to use.
1122 		 * since an anon with no swap cannot be PG_CLEAN,
1123 		 * clear its clean flag now.
1124 		 */
1125 		atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1126 		uvm_anon_dropswap(anon);
1127 	}
1128 
1129 	/*
1130 	 * done case 1!  finish up by unlocking everything and returning success
1131 	 */
1132 	uvmfault_unlockall(ufi, amap, NULL);
1133 	pmap_update(ufi->orig_map->pmap);
1134 	return 0;
1135 }
1136 
1137 /*
1138  * uvm_fault_lower_lookup: look up on-memory uobj pages.
1139  *
1140  *	1. get on-memory pages.
1141  *	2. if failed, give up (get only center page later).
1142  *	3. if succeeded, enter h/w mapping of neighbor pages.
1143  */
1144 
1145 struct vm_page *
1146 uvm_fault_lower_lookup(
1147 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1148 	struct vm_page **pages)
1149 {
1150 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1151 	struct vm_page *uobjpage = NULL;
1152 	int lcv, gotpages, entered;
1153 	vaddr_t currva;
1154 	paddr_t pa;
1155 
1156 	rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1157 
1158 	counters_inc(uvmexp_counters, flt_lget);
1159 	gotpages = flt->npages;
1160 	(void) uobj->pgops->pgo_get(uobj,
1161 	    ufi->entry->offset + (flt->startva - ufi->entry->start),
1162 	    pages, &gotpages, flt->centeridx,
1163 	    flt->access_type & MASK(ufi->entry), ufi->entry->advice,
1164 	    PGO_LOCKED);
1165 
1166 	/*
1167 	 * check for pages to map, if we got any
1168 	 */
1169 	if (gotpages == 0) {
1170 		return NULL;
1171 	}
1172 
1173 	entered = 0;
1174 	currva = flt->startva;
1175 	for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1176 		if (pages[lcv] == NULL ||
1177 		    pages[lcv] == PGO_DONTCARE)
1178 			continue;
1179 
1180 		KASSERT((pages[lcv]->pg_flags & PG_BUSY) == 0);
1181 		KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
1182 
1183 		/*
1184 		 * if center page is resident and not PG_BUSY, then pgo_get
1185 		 * gave us a handle to it.
1186 		 * remember this page as "uobjpage." (for later use).
1187 		 */
1188 		if (lcv == flt->centeridx) {
1189 			uobjpage = pages[lcv];
1190 			continue;
1191 		}
1192 
1193 		if (pmap_extract(ufi->orig_map->pmap, currva, &pa))
1194 			continue;
1195 
1196 		/*
1197 		 * calling pgo_get with PGO_LOCKED returns us pages which
1198 		 * are neither busy nor released, so we don't need to check
1199 		 * for this.  we can just directly enter the pages.
1200 		 */
1201 		if (pages[lcv]->wire_count == 0) {
1202 			uvm_lock_pageq();
1203 			uvm_pageactivate(pages[lcv]);
1204 			uvm_unlock_pageq();
1205 		}
1206 		counters_inc(uvmexp_counters, flt_nomap);
1207 
1208 		/* No fault-ahead when wired. */
1209 		KASSERT(flt->wired == FALSE);
1210 
1211 		/*
1212 		 * Since this page isn't the page that's actually faulting,
1213 		 * ignore pmap_enter() failures; it's not critical that we
1214 		 * enter these right now.
1215 		 * NOTE: page can't be PG_WANTED or PG_RELEASED because we've
1216 		 * held the lock the whole time we've had the handle.
1217 		 */
1218 		(void) pmap_enter(ufi->orig_map->pmap, currva,
1219 		    VM_PAGE_TO_PHYS(pages[lcv]) | flt->pa_flags,
1220 		    flt->enter_prot & MASK(ufi->entry), PMAP_CANFAIL);
1221 		entered++;
1222 
1223 	}
1224 	if (entered > 0)
1225 		pmap_update(ufi->orig_map->pmap);
1226 
1227 	return uobjpage;
1228 }
1229 
1230 /*
1231  * uvm_fault_lower: handle lower fault.
1232  *
1233  *	1. check uobj
1234  *	1.1. if null, ZFOD.
1235  *	1.2. if not null, look up unnmapped neighbor pages.
1236  *	2. for center page, check if promote.
1237  *	2.1. ZFOD always needs promotion.
1238  *	2.2. other uobjs, when entry is marked COW (usually MAP_PRIVATE vnode).
1239  *	3. if uobj is not ZFOD and page is not found, do i/o.
1240  *	4. dispatch either direct / promote fault.
1241  */
1242 int
1243 uvm_fault_lower(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1244    struct vm_page **pages)
1245 {
1246 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
1247 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1248 	int dropswap = 0;
1249 	struct vm_page *uobjpage, *pg = NULL;
1250 	struct vm_anon *anon = NULL;
1251 	int error;
1252 
1253 	/*
1254 	 * now, if the desired page is not shadowed by the amap and we have
1255 	 * a backing object that does not have a special fault routine, then
1256 	 * we ask (with pgo_get) the object for resident pages that we care
1257 	 * about and attempt to map them in.  we do not let pgo_get block
1258 	 * (PGO_LOCKED).
1259 	 */
1260 	if (uobj == NULL) {
1261 		/* zero fill; don't care neighbor pages */
1262 		uobjpage = NULL;
1263 	} else {
1264 		uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
1265 	}
1266 
1267 	/*
1268 	 * note that at this point we are done with any front or back pages.
1269 	 * we are now going to focus on the center page (i.e. the one we've
1270 	 * faulted on).  if we have faulted on the bottom (uobj)
1271 	 * layer [i.e. case 2] and the page was both present and available,
1272 	 * then we've got a pointer to it as "uobjpage" and we've already
1273 	 * made it BUSY.
1274 	 */
1275 
1276 	/*
1277 	 * locked:
1278 	 */
1279 	KASSERT(amap == NULL ||
1280 	    rw_write_held(amap->am_lock));
1281 	KASSERT(uobj == NULL ||
1282 	    rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1283 
1284 	/*
1285 	 * note that uobjpage can not be PGO_DONTCARE at this point.  we now
1286 	 * set uobjpage to PGO_DONTCARE if we are doing a zero fill.  if we
1287 	 * have a backing object, check and see if we are going to promote
1288 	 * the data up to an anon during the fault.
1289 	 */
1290 	if (uobj == NULL) {
1291 		uobjpage = PGO_DONTCARE;
1292 		flt->promote = TRUE;		/* always need anon here */
1293 	} else {
1294 		KASSERT(uobjpage != PGO_DONTCARE);
1295 		flt->promote = (flt->access_type & PROT_WRITE) &&
1296 		     UVM_ET_ISCOPYONWRITE(ufi->entry);
1297 	}
1298 
1299 	/*
1300 	 * if uobjpage is not null then we do not need to do I/O to get the
1301 	 * uobjpage.
1302 	 *
1303 	 * if uobjpage is null, then we need to ask the pager to
1304 	 * get the data for us.   once we have the data, we need to reverify
1305 	 * the state the world.   we are currently not holding any resources.
1306 	 */
1307 	if (uobjpage) {
1308 		/* update rusage counters */
1309 		curproc->p_ru.ru_minflt++;
1310 		if (uobjpage != PGO_DONTCARE) {
1311 			uvm_lock_pageq();
1312 			uvm_pageactivate(uobjpage);
1313 			uvm_unlock_pageq();
1314 		}
1315 	} else {
1316 		error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1317 		if (error != 0)
1318 			return error;
1319 	}
1320 
1321 	/*
1322 	 * notes:
1323 	 *  - at this point uobjpage can not be NULL
1324 	 *  - at this point uobjpage could be PG_WANTED (handle later)
1325 	 */
1326 	if (flt->promote == FALSE) {
1327 		/*
1328 		 * we are not promoting.   if the mapping is COW ensure that we
1329 		 * don't give more access than we should (e.g. when doing a read
1330 		 * fault on a COPYONWRITE mapping we want to map the COW page in
1331 		 * R/O even though the entry protection could be R/W).
1332 		 *
1333 		 * set "pg" to the page we want to map in (uobjpage, usually)
1334 		 */
1335 		counters_inc(uvmexp_counters, flt_obj);
1336 		if (UVM_ET_ISCOPYONWRITE(ufi->entry))
1337 			flt->enter_prot &= ~PROT_WRITE;
1338 		pg = uobjpage;		/* map in the actual object */
1339 
1340 		/* assert(uobjpage != PGO_DONTCARE) */
1341 
1342 		/*
1343 		 * we are faulting directly on the page.
1344 		 */
1345 	} else {
1346 		KASSERT(amap != NULL);
1347 
1348 		/* promoting requires a write lock. */
1349 	        KASSERT(rw_write_held(amap->am_lock));
1350 	        KASSERT(uobj == NULL ||
1351 	            rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1352 
1353 		/*
1354 		 * if we are going to promote the data to an anon we
1355 		 * allocate a blank anon here and plug it into our amap.
1356 		 */
1357 		error = uvmfault_promote(ufi, uobjpage, &anon, &pg);
1358 		if (error)
1359 			return error;
1360 
1361 		/*
1362 		 * fill in the data
1363 		 */
1364 		if (uobjpage != PGO_DONTCARE) {
1365 			counters_inc(uvmexp_counters, flt_prcopy);
1366 
1367 			/*
1368 			 * promote to shared amap?  make sure all sharing
1369 			 * procs see it
1370 			 */
1371 			if ((amap_flags(amap) & AMAP_SHARED) != 0) {
1372 				pmap_page_protect(uobjpage, PROT_NONE);
1373 			}
1374 #if defined(MULTIPROCESSOR) && !defined(__HAVE_PMAP_MPSAFE_ENTER_COW)
1375 			/*
1376 			 * Otherwise:
1377 			 * If there are multiple threads, either uvm or the
1378 			 * pmap has to make sure no threads see the old RO
1379 			 * mapping once any have seen the new RW mapping.
1380 			 * uvm does it here by forcing it to PROT_NONE before
1381 			 * inserting the new mapping.
1382 			 */
1383 			else if (P_HASSIBLING(curproc)) {
1384 				pmap_page_protect(uobjpage, PROT_NONE);
1385 			}
1386 #endif
1387 			/* done with copied uobjpage. */
1388 			rw_exit(uobj->vmobjlock);
1389 			uobj = NULL;
1390 		} else {
1391 			counters_inc(uvmexp_counters, flt_przero);
1392 			/*
1393 			 * Page is zero'd and marked dirty by uvm_pagealloc(),
1394 			 * called in uvmfault_promote() above.
1395 			 */
1396 		}
1397 
1398 		if (amap_add(&ufi->entry->aref,
1399 		    ufi->orig_rvaddr - ufi->entry->start, anon, 0)) {
1400 			if (pg->pg_flags & PG_WANTED)
1401 				wakeup(pg);
1402 
1403 			atomic_clearbits_int(&pg->pg_flags,
1404 			    PG_BUSY|PG_FAKE|PG_WANTED);
1405 			UVM_PAGE_OWN(pg, NULL);
1406 			uvmfault_unlockall(ufi, amap, uobj);
1407 			uvm_anfree(anon);
1408 			counters_inc(uvmexp_counters, flt_noamap);
1409 
1410 			if (uvm_swapisfull())
1411 				return (ENOMEM);
1412 
1413 			amap_populate(&ufi->entry->aref,
1414 			    ufi->orig_rvaddr - ufi->entry->start);
1415 			return ERESTART;
1416 		}
1417 	}
1418 
1419 	/*
1420 	 * anon must be write locked (promotion).  uobj can be either.
1421 	 *
1422 	 * Note: pg is either the uobjpage or the new page in the new anon.
1423 	 */
1424 	KASSERT(amap == NULL ||
1425 	    rw_write_held(amap->am_lock));
1426 	KASSERT(uobj == NULL ||
1427 	    rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1428 	KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
1429 
1430 	/*
1431 	 * all resources are present.   we can now map it in and free our
1432 	 * resources.
1433 	 */
1434 	if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
1435 	    VM_PAGE_TO_PHYS(pg) | flt->pa_flags, flt->enter_prot,
1436 	    flt->access_type | PMAP_CANFAIL | (flt->wired ? PMAP_WIRED : 0)) != 0) {
1437 		/*
1438 		 * No need to undo what we did; we can simply think of
1439 		 * this as the pmap throwing away the mapping information.
1440 		 *
1441 		 * We do, however, have to go through the ReFault path,
1442 		 * as the map may change while we're asleep.
1443 		 */
1444 		if (pg->pg_flags & PG_WANTED)
1445 			wakeup(pg);
1446 
1447 		atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
1448 		UVM_PAGE_OWN(pg, NULL);
1449 		uvmfault_unlockall(ufi, amap, uobj);
1450 		if (uvm_swapisfull()) {
1451 			/* XXX instrumentation */
1452 			return (ENOMEM);
1453 		}
1454 #ifdef __HAVE_PMAP_POPULATE
1455 		pmap_populate(ufi->orig_map->pmap, ufi->orig_rvaddr);
1456 #else
1457 		/* XXX instrumentation */
1458 		uvm_wait("flt_pmfail2");
1459 #endif
1460 		return ERESTART;
1461 	}
1462 
1463 	uvm_lock_pageq();
1464 	if (flt->wired) {
1465 		uvm_pagewire(pg);
1466 		if (pg->pg_flags & PQ_AOBJ) {
1467 			/*
1468 			 * since the now-wired page cannot be paged out,
1469 			 * release its swap resources for others to use.
1470 			 * since an aobj page with no swap cannot be clean,
1471 			 * mark it dirty now.
1472 			 *
1473 			 * use pg->uobject here.  if the page is from a
1474 			 * tmpfs vnode, the pages are backed by its UAO and
1475 			 * not the vnode.
1476 			 */
1477 			KASSERT(uobj != NULL);
1478 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
1479 			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1480 			dropswap = 1;
1481 		}
1482 	} else {
1483 		uvm_pageactivate(pg);
1484 	}
1485 	uvm_unlock_pageq();
1486 
1487 	if (dropswap)
1488 		uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
1489 
1490 	if (pg->pg_flags & PG_WANTED)
1491 		wakeup(pg);
1492 
1493 	atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
1494 	UVM_PAGE_OWN(pg, NULL);
1495 	uvmfault_unlockall(ufi, amap, uobj);
1496 	pmap_update(ufi->orig_map->pmap);
1497 
1498 	return (0);
1499 }
1500 
1501 /*
1502  * uvm_fault_lower_io: get lower page from backing store.
1503  *
1504  *	1. unlock everything, because i/o will block.
1505  *	2. call pgo_get.
1506  *	3. if failed, recover.
1507  *	4. if succeeded, relock everything and verify things.
1508  */
1509 int
1510 uvm_fault_lower_io(
1511 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1512 	struct uvm_object **ruobj, struct vm_page **ruobjpage)
1513 {
1514 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1515 	struct uvm_object *uobj = *ruobj;
1516 	struct vm_page *pg;
1517 	boolean_t locked;
1518 	int gotpages, advice;
1519 	int result;
1520 	voff_t uoff;
1521 	vm_prot_t access_type;
1522 
1523 	/* grab everything we need from the entry before we unlock */
1524 	uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
1525 	access_type = flt->access_type & MASK(ufi->entry);
1526 	advice = ufi->entry->advice;
1527 
1528 	uvmfault_unlockall(ufi, amap, NULL);
1529 
1530 	/* update rusage counters */
1531 	curproc->p_ru.ru_majflt++;
1532 
1533 	KASSERT(rw_write_held(uobj->vmobjlock));
1534 
1535 	counters_inc(uvmexp_counters, flt_get);
1536 	gotpages = 1;
1537 	pg = NULL;
1538 	result = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
1539 	    0, access_type, advice, PGO_SYNCIO);
1540 
1541 	/*
1542 	 * recover from I/O
1543 	 */
1544 	if (result != VM_PAGER_OK) {
1545 		KASSERT(result != VM_PAGER_PEND);
1546 
1547 		if (result == VM_PAGER_AGAIN) {
1548 			tsleep_nsec(&nowake, PVM, "fltagain2", MSEC_TO_NSEC(5));
1549 			return ERESTART;
1550 		}
1551 
1552 		if (!UVM_ET_ISNOFAULT(ufi->entry))
1553 			return (EIO);
1554 
1555 		pg = PGO_DONTCARE;
1556 		uobj = NULL;
1557 		flt->promote = TRUE;
1558 	}
1559 
1560 	/* re-verify the state of the world.  */
1561 	locked = uvmfault_relock(ufi);
1562 	if (locked && amap != NULL)
1563 		amap_lock(amap, RW_WRITE);
1564 
1565 	/* might be changed */
1566 	if (pg != PGO_DONTCARE) {
1567 		uobj = pg->uobject;
1568 		rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1569 		KASSERT((pg->pg_flags & PG_BUSY) != 0);
1570 		KASSERT(flt->lower_lock_type == RW_WRITE);
1571 	}
1572 
1573 	/*
1574 	 * Re-verify that amap slot is still free. if there is
1575 	 * a problem, we clean up.
1576 	 */
1577 	if (locked && amap && amap_lookup(&ufi->entry->aref,
1578 	      ufi->orig_rvaddr - ufi->entry->start)) {
1579 		if (locked)
1580 			uvmfault_unlockall(ufi, amap, NULL);
1581 		locked = FALSE;
1582 	}
1583 
1584 	/* release the page now, still holding object lock */
1585 	if (pg != PGO_DONTCARE) {
1586 		uvm_lock_pageq();
1587 		uvm_pageactivate(pg);
1588 		uvm_unlock_pageq();
1589 
1590 		if (pg->pg_flags & PG_WANTED)
1591 			wakeup(pg);
1592 		atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_WANTED);
1593 		UVM_PAGE_OWN(pg, NULL);
1594 	}
1595 
1596 	if (locked == FALSE) {
1597 		if (pg != PGO_DONTCARE)
1598 			rw_exit(uobj->vmobjlock);
1599 		return ERESTART;
1600 	}
1601 
1602 	/*
1603 	 * we have the data in pg.  we are holding object lock (so the page
1604 	 * can't be released on us).
1605 	 */
1606 	*ruobj = uobj;
1607 	*ruobjpage = pg;
1608 	return 0;
1609 }
1610 
1611 /*
1612  * uvm_fault_wire: wire down a range of virtual addresses in a map.
1613  *
1614  * => map may be read-locked by caller, but MUST NOT be write-locked.
1615  * => if map is read-locked, any operations which may cause map to
1616  *	be write-locked in uvm_fault() must be taken care of by
1617  *	the caller.  See uvm_map_pageable().
1618  */
1619 int
1620 uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
1621 {
1622 	vaddr_t va;
1623 	int rv;
1624 
1625 	/*
1626 	 * now fault it in a page at a time.   if the fault fails then we have
1627 	 * to undo what we have done.   note that in uvm_fault PROT_NONE
1628 	 * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
1629 	 */
1630 	for (va = start ; va < end ; va += PAGE_SIZE) {
1631 		rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
1632 		if (rv) {
1633 			if (va != start) {
1634 				uvm_fault_unwire(map, start, va);
1635 			}
1636 			return (rv);
1637 		}
1638 	}
1639 
1640 	return (0);
1641 }
1642 
1643 /*
1644  * uvm_fault_unwire(): unwire range of virtual space.
1645  */
1646 void
1647 uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end)
1648 {
1649 
1650 	vm_map_lock_read(map);
1651 	uvm_fault_unwire_locked(map, start, end);
1652 	vm_map_unlock_read(map);
1653 }
1654 
1655 /*
1656  * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
1657  *
1658  * => map must be at least read-locked.
1659  */
1660 void
1661 uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
1662 {
1663 	vm_map_entry_t entry, oentry = NULL, next;
1664 	pmap_t pmap = vm_map_pmap(map);
1665 	vaddr_t va;
1666 	paddr_t pa;
1667 	struct vm_page *pg;
1668 
1669 	KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1670 	vm_map_assert_anylock(map);
1671 
1672 	/*
1673 	 * we assume that the area we are unwiring has actually been wired
1674 	 * in the first place.   this means that we should be able to extract
1675 	 * the PAs from the pmap.
1676 	 */
1677 
1678 	/*
1679 	 * find the beginning map entry for the region.
1680 	 */
1681 	KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
1682 	if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
1683 		panic("uvm_fault_unwire_locked: address not in map");
1684 
1685 	for (va = start; va < end ; va += PAGE_SIZE) {
1686 		/*
1687 		 * find the map entry for the current address.
1688 		 */
1689 		KASSERT(va >= entry->start);
1690 		while (entry && va >= entry->end) {
1691 			next = RBT_NEXT(uvm_map_addr, entry);
1692 			entry = next;
1693 		}
1694 
1695 		if (entry == NULL)
1696 			return;
1697 		if (va < entry->start)
1698 			continue;
1699 
1700 		/*
1701 		 * lock it.
1702 		 */
1703 		if (entry != oentry) {
1704 			if (oentry != NULL) {
1705 				uvm_map_unlock_entry(oentry);
1706 			}
1707 			uvm_map_lock_entry(entry);
1708 			oentry = entry;
1709 		}
1710 
1711 		if (!pmap_extract(pmap, va, &pa))
1712 			continue;
1713 
1714 		/*
1715 		 * if the entry is no longer wired, tell the pmap.
1716 		 */
1717 		if (VM_MAPENT_ISWIRED(entry) == 0)
1718 			pmap_unwire(pmap, va);
1719 
1720 		pg = PHYS_TO_VM_PAGE(pa);
1721 		if (pg) {
1722 			uvm_lock_pageq();
1723 			uvm_pageunwire(pg);
1724 			uvm_unlock_pageq();
1725 		}
1726 	}
1727 
1728 	if (oentry != NULL) {
1729 		uvm_map_unlock_entry(oentry);
1730 	}
1731 }
1732 
1733 /*
1734  * uvmfault_unlockmaps: unlock the maps
1735  */
1736 void
1737 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
1738 {
1739 	/*
1740 	 * ufi can be NULL when this isn't really a fault,
1741 	 * but merely paging in anon data.
1742 	 */
1743 	if (ufi == NULL) {
1744 		return;
1745 	}
1746 
1747 	uvmfault_update_stats(ufi);
1748 	if (write_locked) {
1749 		vm_map_unlock(ufi->map);
1750 	} else {
1751 		vm_map_unlock_read(ufi->map);
1752 	}
1753 }
1754 
1755 /*
1756  * uvmfault_unlockall: unlock everything passed in.
1757  *
1758  * => maps must be read-locked (not write-locked).
1759  */
1760 void
1761 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
1762     struct uvm_object *uobj)
1763 {
1764 	if (uobj)
1765 		rw_exit(uobj->vmobjlock);
1766 	if (amap != NULL)
1767 		amap_unlock(amap);
1768 	uvmfault_unlockmaps(ufi, FALSE);
1769 }
1770 
1771 /*
1772  * uvmfault_lookup: lookup a virtual address in a map
1773  *
1774  * => caller must provide a uvm_faultinfo structure with the IN
1775  *	params properly filled in
1776  * => we will lookup the map entry (handling submaps) as we go
1777  * => if the lookup is a success we will return with the maps locked
1778  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
1779  *	get a read lock.
1780  * => note that submaps can only appear in the kernel and they are
1781  *	required to use the same virtual addresses as the map they
1782  *	are referenced by (thus address translation between the main
1783  *	map and the submap is unnecessary).
1784  */
1785 
1786 boolean_t
1787 uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
1788 {
1789 	vm_map_t tmpmap;
1790 
1791 	/*
1792 	 * init ufi values for lookup.
1793 	 */
1794 	ufi->map = ufi->orig_map;
1795 	ufi->size = ufi->orig_size;
1796 
1797 	/*
1798 	 * keep going down levels until we are done.   note that there can
1799 	 * only be two levels so we won't loop very long.
1800 	 */
1801 	while (1) {
1802 		if (ufi->orig_rvaddr < ufi->map->min_offset ||
1803 		    ufi->orig_rvaddr >= ufi->map->max_offset)
1804 			return FALSE;
1805 
1806 		/* lock map */
1807 		if (write_lock) {
1808 			vm_map_lock(ufi->map);
1809 		} else {
1810 			vm_map_lock_read(ufi->map);
1811 		}
1812 
1813 		/* lookup */
1814 		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
1815 		    &ufi->entry)) {
1816 			uvmfault_unlockmaps(ufi, write_lock);
1817 			return FALSE;
1818 		}
1819 
1820 		/* reduce size if necessary */
1821 		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
1822 			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
1823 
1824 		/*
1825 		 * submap?    replace map with the submap and lookup again.
1826 		 * note: VAs in submaps must match VAs in main map.
1827 		 */
1828 		if (UVM_ET_ISSUBMAP(ufi->entry)) {
1829 			tmpmap = ufi->entry->object.sub_map;
1830 			uvmfault_unlockmaps(ufi, write_lock);
1831 			ufi->map = tmpmap;
1832 			continue;
1833 		}
1834 
1835 		/*
1836 		 * got it!
1837 		 */
1838 		ufi->mapv = ufi->map->timestamp;
1839 		return TRUE;
1840 
1841 	}	/* while loop */
1842 
1843 	/*NOTREACHED*/
1844 }
1845 
1846 /*
1847  * uvmfault_relock: attempt to relock the same version of the map
1848  *
1849  * => fault data structures should be unlocked before calling.
1850  * => if a success (TRUE) maps will be locked after call.
1851  */
1852 boolean_t
1853 uvmfault_relock(struct uvm_faultinfo *ufi)
1854 {
1855 	/*
1856 	 * ufi can be NULL when this isn't really a fault,
1857 	 * but merely paging in anon data.
1858 	 */
1859 	if (ufi == NULL) {
1860 		return TRUE;
1861 	}
1862 
1863 	counters_inc(uvmexp_counters, flt_relck);
1864 
1865 	/*
1866 	 * relock map.   fail if version mismatch (in which case nothing
1867 	 * gets locked).
1868 	 */
1869 	vm_map_lock_read(ufi->map);
1870 	if (ufi->mapv != ufi->map->timestamp) {
1871 		vm_map_unlock_read(ufi->map);
1872 		return FALSE;
1873 	}
1874 
1875 	counters_inc(uvmexp_counters, flt_relckok);
1876 	return TRUE;		/* got it! */
1877 }
1878