xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.12 1998/11/04 07:06:05 chs Exp $	*/
2 
3 /*
4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6  */
7 /*
8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
9  * Copyright (c) 1991, 1993, The Regents of the University of California.
10  *
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by Charles D. Cranor,
27  *      Washington University, the University of California, Berkeley and
28  *      its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
46  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
47  *
48  *
49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50  * All rights reserved.
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 #include "opt_uvmhist.h"
74 
75 /*
76  * uvm_pdaemon.c: the page daemon
77  */
78 
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_kern.h>
88 
89 #include <uvm/uvm.h>
90 
91 /*
92  * local prototypes
93  */
94 
95 static void		uvmpd_scan __P((void));
96 static boolean_t	uvmpd_scan_inactive __P((struct pglist *));
97 static void		uvmpd_tune __P((void));
98 
99 
100 /*
101  * uvm_wait: wait (sleep) for the page daemon to free some pages
102  *
103  * => should be called with all locks released
104  * => should _not_ be called by the page daemon (to avoid deadlock)
105  */
106 
107 void uvm_wait(wmsg)
108 	char *wmsg;
109 {
110 	int timo = 0;
111 	int s = splbio();
112 
113 	/*
114 	 * check for page daemon going to sleep (waiting for itself)
115 	 */
116 
117 	if (curproc == uvm.pagedaemon_proc) {
118 		/*
119 		 * now we have a problem: the pagedaemon wants to go to
120 		 * sleep until it frees more memory.   but how can it
121 		 * free more memory if it is asleep?  that is a deadlock.
122 		 * we have two options:
123 		 *  [1] panic now
124 		 *  [2] put a timeout on the sleep, thus causing the
125 		 *      pagedaemon to only pause (rather than sleep forever)
126 		 *
127 		 * note that option [2] will only help us if we get lucky
128 		 * and some other process on the system breaks the deadlock
129 		 * by exiting or freeing memory (thus allowing the pagedaemon
130 		 * to continue).  for now we panic if DEBUG is defined,
131 		 * otherwise we hope for the best with option [2] (better
132 		 * yet, this should never happen in the first place!).
133 		 */
134 
135 		printf("pagedaemon: deadlock detected!\n");
136 		timo = hz >> 3;		/* set timeout */
137 #if defined(DEBUG)
138 		/* DEBUG: panic so we can debug it */
139 		panic("pagedaemon deadlock");
140 #endif
141 	}
142 
143 	simple_lock(&uvm.pagedaemon_lock);
144 	thread_wakeup(&uvm.pagedaemon);		/* wake the daemon! */
145 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
146 	    timo);
147 
148 	splx(s);
149 }
150 
151 
152 /*
153  * uvmpd_tune: tune paging parameters
154  *
155  * => called when ever memory is added (or removed?) to the system
156  * => caller must call with page queues locked
157  */
158 
159 static void
160 uvmpd_tune()
161 {
162 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
163 
164 	uvmexp.freemin = uvmexp.npages / 20;
165 
166 	/* between 16k and 256k */
167 	/* XXX:  what are these values good for? */
168 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
169 	uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
170 
171 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
172 	if (uvmexp.freetarg <= uvmexp.freemin)
173 		uvmexp.freetarg = uvmexp.freemin + 1;
174 
175 	/* uvmexp.inactarg: computed in main daemon loop */
176 
177 	uvmexp.wiredmax = uvmexp.npages / 3;
178 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
179 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
180 }
181 
182 /*
183  * uvm_pageout: the main loop for the pagedaemon
184  */
185 
186 void
187 uvm_pageout()
188 {
189 	int npages = 0;
190 	int s;
191 	struct uvm_aiodesc *aio, *nextaio;
192 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
193 
194 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
195 
196 	/*
197 	 * ensure correct priority and set paging parameters...
198 	 */
199 
200 	uvm.pagedaemon_proc = curproc;
201 	(void) spl0();
202 	uvm_lock_pageq();
203 	npages = uvmexp.npages;
204 	uvmpd_tune();
205 	uvm_unlock_pageq();
206 
207 	/*
208 	 * main loop
209 	 */
210 	while (TRUE) {
211 
212 		/*
213 		 * carefully attempt to go to sleep (without losing "wakeups"!).
214 		 * we need splbio because we want to make sure the aio_done list
215 		 * is totally empty before we go to sleep.
216 		 */
217 
218 		s = splbio();
219 		simple_lock(&uvm.pagedaemon_lock);
220 
221 		/*
222 		 * if we've got done aio's, then bypass the sleep
223 		 */
224 
225 		if (uvm.aio_done.tqh_first == NULL) {
226 			UVMHIST_LOG(maphist,"  <<SLEEPING>>",0,0,0,0);
227 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
228 			    &uvm.pagedaemon_lock, FALSE, "daemon_slp", 0);
229 			uvmexp.pdwoke++;
230 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
231 
232 			/* relock pagedaemon_lock, still at splbio */
233 			simple_lock(&uvm.pagedaemon_lock);
234 		}
235 
236 		/*
237 		 * check for done aio structures
238 		 */
239 
240 		aio = uvm.aio_done.tqh_first;	/* save current list (if any)*/
241 		if (aio) {
242 			TAILQ_INIT(&uvm.aio_done);	/* zero global list */
243 		}
244 
245 		simple_unlock(&uvm.pagedaemon_lock);	/* unlock */
246 		splx(s);				/* drop splbio */
247 
248 		/*
249 		 * first clear out any pending aios (to free space in case we
250 		 * want to pageout more stuff).
251 		 */
252 
253 		for (/*null*/; aio != NULL ; aio = nextaio) {
254 
255 			uvmexp.paging -= aio->npages;
256 			nextaio = aio->aioq.tqe_next;
257 			aio->aiodone(aio);
258 
259 		}
260 
261 		/* Next, drain pool resources */
262 		pool_drain(0);
263 
264 		/*
265 		 * now lock page queues and recompute inactive count
266 		 */
267 		uvm_lock_pageq();
268 
269 		if (npages != uvmexp.npages) {	/* check for new pages? */
270 			npages = uvmexp.npages;
271 			uvmpd_tune();
272 		}
273 
274 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
275 		if (uvmexp.inactarg <= uvmexp.freetarg)
276 			uvmexp.inactarg = uvmexp.freetarg + 1;
277 
278 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
279 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
280 		    uvmexp.inactarg);
281 
282 		/*
283 		 * scan if needed
284 		 * [XXX: note we are reading uvm.free without locking]
285 		 */
286 		if (uvmexp.free < uvmexp.freetarg ||
287 		    uvmexp.inactive < uvmexp.inactarg)
288 			uvmpd_scan();
289 
290 		/*
291 		 * done scan.  unlock page queues (the only lock we are holding)
292 		 */
293 		uvm_unlock_pageq();
294 
295 		/*
296 		 * done!    restart loop.
297 		 */
298 		thread_wakeup(&uvmexp.free);
299 	}
300 	/*NOTREACHED*/
301 }
302 
303 /*
304  * uvmpd_scan_inactive: the first loop of uvmpd_scan broken out into
305  * 	its own function for ease of reading.
306  *
307  * => called with page queues locked
308  * => we work on meeting our free target by converting inactive pages
309  *    into free pages.
310  * => we handle the building of swap-backed clusters
311  * => we return TRUE if we are exiting because we met our target
312  */
313 
314 static boolean_t
315 uvmpd_scan_inactive(pglst)
316 	struct pglist *pglst;
317 {
318 	boolean_t retval = FALSE;	/* assume we haven't hit target */
319 	int s, free, result;
320 	struct vm_page *p, *nextpg;
321 	struct uvm_object *uobj;
322 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
323 	int npages;
324 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
325 	int swnpages, swcpages;				/* XXX: see below */
326 	int swslot, oldslot;
327 	struct vm_anon *anon;
328 	boolean_t swap_backed;
329 	vaddr_t start;
330 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
331 
332 	/*
333 	 * note: we currently keep swap-backed pages on a seperate inactive
334 	 * list from object-backed pages.   however, merging the two lists
335 	 * back together again hasn't been ruled out.   thus, we keep our
336 	 * swap cluster in "swpps" rather than in pps (allows us to mix
337 	 * clustering types in the event of a mixed inactive queue).
338 	 */
339 
340 	/*
341 	 * swslot is non-zero if we are building a swap cluster.  we want
342 	 * to stay in the loop while we have a page to scan or we have
343 	 * a swap-cluster to build.
344 	 */
345 	swslot = 0;
346 	swnpages = swcpages = 0;
347 	free = 0;
348 
349 	for (p = pglst->tqh_first ; p != NULL || swslot != 0 ; p = nextpg) {
350 
351 		/*
352 		 * note that p can be NULL iff we have traversed the whole
353 		 * list and need to do one final swap-backed clustered pageout.
354 		 */
355 		if (p) {
356 			/*
357 			 * update our copy of "free" and see if we've met
358 			 * our target
359 			 */
360 			s = splimp();
361 			uvm_lock_fpageq();
362 			free = uvmexp.free;
363 			uvm_unlock_fpageq();
364 			splx(s);
365 
366 			if (free >= uvmexp.freetarg) {
367 				UVMHIST_LOG(pdhist,"  met free target: "
368 				    "exit loop", 0, 0, 0, 0);
369 				retval = TRUE;		/* hit the target! */
370 
371 				if (swslot == 0)
372 					/* exit now if no swap-i/o pending */
373 					break;
374 
375 				/* set p to null to signal final swap i/o */
376 				p = NULL;
377 			}
378 		}
379 
380 		uobj = NULL;	/* be safe and shut gcc up */
381 		anon = NULL;	/* be safe and shut gcc up */
382 
383 		if (p) {	/* if (we have a new page to consider) */
384 			/*
385 			 * we are below target and have a new page to consider.
386 			 */
387 			uvmexp.pdscans++;
388 			nextpg = p->pageq.tqe_next;
389 
390 			/*
391 			 * move referenced pages back to active queue and
392 			 * skip to next page (unlikely to happen since
393 			 * inactive pages shouldn't have any valid mappings
394 			 * and we cleared reference before deactivating).
395 			 */
396 			if (pmap_is_referenced(PMAP_PGARG(p))) {
397 				uvm_pageactivate(p);
398 				uvmexp.pdreact++;
399 				continue;
400 			}
401 
402 			/*
403 			 * first we attempt to lock the object that this page
404 			 * belongs to.  if our attempt fails we skip on to
405 			 * the next page (no harm done).  it is important to
406 			 * "try" locking the object as we are locking in the
407 			 * wrong order (pageq -> object) and we don't want to
408 			 * get deadlocked.
409 			 *
410 			 * the only time we exepct to see an ownerless page
411 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
412 			 * anon has loaned a page from a uvm_object and the
413 			 * uvm_object has dropped the ownership.  in that
414 			 * case, the anon can "take over" the loaned page
415 			 * and make it its own.
416 			 */
417 
418 			/* is page part of an anon or ownerless ? */
419 			if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
420 
421 				anon = p->uanon;
422 
423 #ifdef DIAGNOSTIC
424 				/* to be on inactive q, page must be part
425 				 * of _something_ */
426 				if (anon == NULL)
427 					panic("pagedaemon: page with no anon "
428 					    "or object detected - loop 1");
429 #endif
430 
431 				if (!simple_lock_try(&anon->an_lock))
432 					/* lock failed, skip this page */
433 					continue;
434 
435 				/*
436 				 * if the page is ownerless, claim it in the
437 				 * name of "anon"!
438 				 */
439 				if ((p->pqflags & PQ_ANON) == 0) {
440 #ifdef DIAGNOSTIC
441 					if (p->loan_count < 1)
442 						panic("pagedaemon: non-loaned "
443 						    "ownerless page detected -"
444 						    " loop 1");
445 #endif
446 					p->loan_count--;
447 					p->pqflags |= PQ_ANON;      /* anon now owns it */
448 				}
449 
450 				if (p->flags & PG_BUSY) {
451 					simple_unlock(&anon->an_lock);
452 					uvmexp.pdbusy++;
453 					/* someone else owns page, skip it */
454 					continue;
455 				}
456 
457 				uvmexp.pdanscan++;
458 
459 			} else {
460 
461 				uobj = p->uobject;
462 
463 				if (!simple_lock_try(&uobj->vmobjlock))
464 					/* lock failed, skip this page */
465 					continue;
466 
467 				if (p->flags & PG_BUSY) {
468 					simple_unlock(&uobj->vmobjlock);
469 					uvmexp.pdbusy++;
470 					/* someone else owns page, skip it */
471 					continue;
472 				}
473 
474 				uvmexp.pdobscan++;
475 			}
476 
477 			/*
478 			 * we now have the object and the page queues locked.
479 			 * the page is not busy.   if the page is clean we
480 			 * can free it now and continue.
481 			 */
482 
483 			if (p->flags & PG_CLEAN) {
484 				/* zap all mappings with pmap_page_protect... */
485 				pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
486 				uvm_pagefree(p);
487 				uvmexp.pdfreed++;
488 
489 				if (anon) {
490 #ifdef DIAGNOSTIC
491 					/*
492 					 * an anonymous page can only be clean
493 					 * if it has valid backing store.
494 					 */
495 					if (anon->an_swslot == 0)
496 						panic("pagedaemon: clean anon "
497 						 "page without backing store?");
498 #endif
499 					/* remove from object */
500 					anon->u.an_page = NULL;
501 					simple_unlock(&anon->an_lock);
502 				} else {
503 					/* pagefree has already removed the
504 					 * page from the object */
505 					simple_unlock(&uobj->vmobjlock);
506 				}
507 				continue;
508 			}
509 
510 			/*
511 			 * this page is dirty, skip it if we'll have met our
512 			 * free target when all the current pageouts complete.
513 			 */
514 			if (free + uvmexp.paging > uvmexp.freetarg)
515 			{
516 				if (anon) {
517 					simple_unlock(&anon->an_lock);
518 				} else {
519 					simple_unlock(&uobj->vmobjlock);
520 				}
521 				continue;
522 			}
523 
524 			/*
525 			 * the page we are looking at is dirty.   we must
526 			 * clean it before it can be freed.  to do this we
527 			 * first mark the page busy so that no one else will
528 			 * touch the page.   we write protect all the mappings
529 			 * of the page so that no one touches it while it is
530 			 * in I/O.
531 			 */
532 
533 			swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
534 			p->flags |= PG_BUSY;		/* now we own it */
535 			UVM_PAGE_OWN(p, "scan_inactive");
536 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ);
537 			uvmexp.pgswapout++;
538 
539 			/*
540 			 * for swap-backed pages we need to (re)allocate
541 			 * swap space.
542 			 */
543 			if (swap_backed) {
544 
545 				/*
546 				 * free old swap slot (if any)
547 				 */
548 				if (anon) {
549 					if (anon->an_swslot) {
550 						uvm_swap_free(anon->an_swslot,
551 						    1);
552 						anon->an_swslot = 0;
553 					}
554 				} else {
555 					oldslot = uao_set_swslot(uobj,
556 					    p->offset >> PAGE_SHIFT, 0);
557 
558 					if (oldslot)
559 						uvm_swap_free(oldslot, 1);
560 				}
561 
562 				/*
563 				 * start new cluster (if necessary)
564 				 */
565 				if (swslot == 0) {
566 					/* want this much */
567 					swnpages = MAXBSIZE >> PAGE_SHIFT;
568 
569 					swslot = uvm_swap_alloc(&swnpages,
570 					    TRUE);
571 
572 					if (swslot == 0) {
573 						/* no swap?  give up! */
574 						p->flags &= ~PG_BUSY;
575 						UVM_PAGE_OWN(p, NULL);
576 						if (anon)
577 							simple_unlock(
578 							    &anon->an_lock);
579 						else
580 							simple_unlock(
581 							    &uobj->vmobjlock);
582 						continue;
583 					}
584 					swcpages = 0;	/* cluster is empty */
585 				}
586 
587 				/*
588 				 * add block to cluster
589 				 */
590 				swpps[swcpages] = p;
591 				uvmexp.pgswapout++;
592 				if (anon)
593 					anon->an_swslot = swslot + swcpages;
594 				else
595 					uao_set_swslot(uobj,
596 					    p->offset >> PAGE_SHIFT,
597 					    swslot + swcpages);
598 				swcpages++;
599 
600 				/* done (swap-backed) */
601 			}
602 
603 			/* end: if (p) ["if we have new page to consider"] */
604 		} else {
605 
606 			/* if p == NULL we must be doing a last swap i/o */
607 			swap_backed = TRUE;
608 		}
609 
610 		/*
611 		 * now consider doing the pageout.
612 		 *
613 		 * for swap-backed pages, we do the pageout if we have either
614 		 * filled the cluster (in which case (swnpages == swcpages) or
615 		 * run out of pages (p == NULL).
616 		 *
617 		 * for object pages, we always do the pageout.
618 		 */
619 		if (swap_backed) {
620 
621 			if (p) {	/* if we just added a page to cluster */
622 				if (anon)
623 					simple_unlock(&anon->an_lock);
624 				else
625 					simple_unlock(&uobj->vmobjlock);
626 
627 				/* cluster not full yet? */
628 				if (swcpages < swnpages)
629 					continue;
630 			}
631 
632 			/* starting I/O now... set up for it */
633 			npages = swcpages;
634 			ppsp = swpps;
635 			/* for swap-backed pages only */
636 			start = (vaddr_t) swslot;
637 
638 			/* if this is final pageout we could have a few
639 			 * extra swap blocks */
640 			if (swcpages < swnpages) {
641 				uvm_swap_free(swslot + swcpages,
642 				    (swnpages - swcpages));
643 			}
644 
645 		} else {
646 
647 			/* normal object pageout */
648 			ppsp = pps;
649 			npages = sizeof(pps) / sizeof(struct vm_page *);
650 			/* not looked at because PGO_ALLPAGES is set */
651 			start = 0;
652 
653 		}
654 
655 		/*
656 		 * now do the pageout.
657 		 *
658 		 * for swap_backed pages we have already built the cluster.
659 		 * for !swap_backed pages, uvm_pager_put will call the object's
660 		 * "make put cluster" function to build a cluster on our behalf.
661 		 *
662 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
663 		 * it to free the cluster pages for us on a successful I/O (it
664 		 * always does this for un-successful I/O requests).  this
665 		 * allows us to do clustered pageout without having to deal
666 		 * with cluster pages at this level.
667 		 *
668 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
669 		 *  IN: locked: uobj (if !swap_backed), page queues
670 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
671 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
672 		 *
673 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
674 		 */
675 
676 		/* locked: uobj (if !swap_backed), page queues */
677 		uvmexp.pdpageouts++;
678 		result = uvm_pager_put((swap_backed) ? NULL : uobj, p,
679 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
680 		/* locked: uobj (if !swap_backed && result != PEND) */
681 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
682 
683 		/*
684 		 * if we did i/o to swap, zero swslot to indicate that we are
685 		 * no longer building a swap-backed cluster.
686 		 */
687 
688 		if (swap_backed)
689 			swslot = 0;		/* done with this cluster */
690 
691 		/*
692 		 * first, we check for VM_PAGER_PEND which means that the
693 		 * async I/O is in progress and the async I/O done routine
694 		 * will clean up after us.   in this case we move on to the
695 		 * next page.
696 		 *
697 		 * there is a very remote chance that the pending async i/o can
698 		 * finish _before_ we get here.   if that happens, our page "p"
699 		 * may no longer be on the inactive queue.   so we verify this
700 		 * when determining the next page (starting over at the head if
701 		 * we've lost our inactive page).
702 		 */
703 
704 		if (result == VM_PAGER_PEND) {
705 			uvmexp.paging += npages;
706 			uvm_lock_pageq();		/* relock page queues */
707 			uvmexp.pdpending++;
708 			if (p) {
709 				if (p->pqflags & PQ_INACTIVE)
710 					/* reload! */
711 					nextpg = p->pageq.tqe_next;
712 				else
713 					/* reload! */
714 					nextpg = pglst->tqh_first;
715 				} else {
716 					nextpg = NULL;		/* done list */
717 			}
718 			continue;
719 		}
720 
721 		/*
722 		 * clean up "p" if we have one
723 		 */
724 
725 		if (p) {
726 			/*
727 			 * the I/O request to "p" is done and uvm_pager_put
728 			 * has freed any cluster pages it may have allocated
729 			 * during I/O.  all that is left for us to do is
730 			 * clean up page "p" (which is still PG_BUSY).
731 			 *
732 			 * our result could be one of the following:
733 			 *   VM_PAGER_OK: successful pageout
734 			 *
735 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
736 			 *     to next page
737 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
738 			 *     "reactivate" page to get it out of the way (it
739 			 *     will eventually drift back into the inactive
740 			 *     queue for a retry).
741 			 *   VM_PAGER_UNLOCK: should never see this as it is
742 			 *     only valid for "get" operations
743 			 */
744 
745 			/* relock p's object: page queues not lock yet, so
746 			 * no need for "try" */
747 
748 			/* !swap_backed case: already locked... */
749 			if (swap_backed) {
750 				if (anon)
751 					simple_lock(&anon->an_lock);
752 				else
753 					simple_lock(&uobj->vmobjlock);
754 			}
755 
756 #ifdef DIAGNOSTIC
757 			if (result == VM_PAGER_UNLOCK)
758 				panic("pagedaemon: pageout returned "
759 				    "invalid 'unlock' code");
760 #endif
761 
762 			/* handle PG_WANTED now */
763 			if (p->flags & PG_WANTED)
764 				/* still holding object lock */
765 				thread_wakeup(p);
766 
767 			p->flags &= ~(PG_BUSY|PG_WANTED);
768 			UVM_PAGE_OWN(p, NULL);
769 
770 			/* released during I/O? */
771 			if (p->flags & PG_RELEASED) {
772 				if (anon) {
773 					/* remove page so we can get nextpg */
774 					anon->u.an_page = NULL;
775 
776 					simple_unlock(&anon->an_lock);
777 					uvm_anfree(anon);	/* kills anon */
778 					pmap_page_protect(PMAP_PGARG(p),
779 					    VM_PROT_NONE);
780 					anon = NULL;
781 					uvm_lock_pageq();
782 					nextpg = p->pageq.tqe_next;
783 					/* free released page */
784 					uvm_pagefree(p);
785 
786 				} else {
787 
788 #ifdef DIAGNOSTIC
789 					if (uobj->pgops->pgo_releasepg == NULL)
790 						panic("pagedaemon: no "
791 						   "pgo_releasepg function");
792 #endif
793 
794 					/*
795 					 * pgo_releasepg nukes the page and
796 					 * gets "nextpg" for us.  it returns
797 					 * with the page queues locked (when
798 					 * given nextpg ptr).
799 					 */
800 					if (!uobj->pgops->pgo_releasepg(p,
801 					    &nextpg))
802 						/* uobj died after release */
803 						uobj = NULL;
804 
805 					/*
806 					 * lock page queues here so that they're
807 					 * always locked at the end of the loop.
808 					 */
809 					uvm_lock_pageq();
810 				}
811 
812 			} else {	/* page was not released during I/O */
813 
814 				uvm_lock_pageq();
815 				nextpg = p->pageq.tqe_next;
816 
817 				if (result != VM_PAGER_OK) {
818 
819 					/* pageout was a failure... */
820 					if (result != VM_PAGER_AGAIN)
821 						uvm_pageactivate(p);
822 					pmap_clear_reference(PMAP_PGARG(p));
823 					/* XXXCDC: if (swap_backed) FREE p's
824 					 * swap block? */
825 
826 				} else {
827 
828 					/* pageout was a success... */
829 					pmap_clear_reference(PMAP_PGARG(p));
830 					pmap_clear_modify(PMAP_PGARG(p));
831 					p->flags |= PG_CLEAN;
832 					/* XXX: could free page here, but old
833 					 * pagedaemon does not */
834 
835 				}
836 			}
837 
838 			/*
839 			 * drop object lock (if there is an object left).   do
840 			 * a safety check of nextpg to make sure it is on the
841 			 * inactive queue (it should be since PG_BUSY pages on
842 			 * the inactive queue can't be re-queued [note: not
843 			 * true for active queue]).
844 			 */
845 
846 			if (anon)
847 				simple_unlock(&anon->an_lock);
848 			else if (uobj)
849 				simple_unlock(&uobj->vmobjlock);
850 
851 		} /* if (p) */ else {
852 
853 			/* if p is null in this loop, make sure it stays null
854 			 * in next loop */
855 			nextpg = NULL;
856 
857 			/*
858 			 * lock page queues here just so they're always locked
859 			 * at the end of the loop.
860 			 */
861 			uvm_lock_pageq();
862 		}
863 
864 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
865 			printf("pagedaemon: invalid nextpg!   reverting to "
866 			    "queue head\n");
867 			nextpg = pglst->tqh_first;	/* reload! */
868 		}
869 
870 	}	/* end of "inactive" 'for' loop */
871 	return (retval);
872 }
873 
874 /*
875  * uvmpd_scan: scan the page queues and attempt to meet our targets.
876  *
877  * => called with pageq's locked
878  */
879 
880 void
881 uvmpd_scan()
882 {
883 	int s, free, pages_freed, page_shortage;
884 	struct vm_page *p, *nextpg;
885 	struct uvm_object *uobj;
886 	boolean_t got_it;
887 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
888 
889 	uvmexp.pdrevs++;		/* counter */
890 
891 #ifdef __GNUC__
892 	uobj = NULL;	/* XXX gcc */
893 #endif
894 	/*
895 	 * get current "free" page count
896 	 */
897 	s = splimp();
898 	uvm_lock_fpageq();
899 	free = uvmexp.free;
900 	uvm_unlock_fpageq();
901 	splx(s);
902 
903 #ifndef __SWAP_BROKEN
904 	/*
905 	 * swap out some processes if we are below our free target.
906 	 * we need to unlock the page queues for this.
907 	 */
908 	if (free < uvmexp.freetarg) {
909 
910 		uvmexp.pdswout++;
911 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout", free,
912 		    uvmexp.freetarg, 0, 0);
913 		uvm_unlock_pageq();
914 		uvm_swapout_threads();
915 		pmap_update();		/* update so we can scan inactive q */
916 		uvm_lock_pageq();
917 
918 	}
919 #endif
920 
921 	/*
922 	 * now we want to work on meeting our targets.   first we work on our
923 	 * free target by converting inactive pages into free pages.  then
924 	 * we work on meeting our inactive target by converting active pages
925 	 * to inactive ones.
926 	 */
927 
928 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
929 	pages_freed = uvmexp.pdfreed;	/* so far... */
930 
931 	/*
932 	 * do loop #1!   alternate starting queue between swap and object based
933 	 * on the low bit of uvmexp.pdrevs (which we bump by one each call).
934 	 */
935 
936 	got_it = FALSE;
937 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
938 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
939 	if (!got_it)
940 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
941 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
942 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
943 
944 	/*
945 	 * we have done the scan to get free pages.   now we work on meeting
946 	 * our inactive target.
947 	 */
948 
949 	page_shortage = uvmexp.inactarg - uvmexp.inactive;
950 	pages_freed = uvmexp.pdfreed - pages_freed; /* # pages freed in loop */
951 	if (page_shortage <= 0 && pages_freed == 0)
952 		page_shortage = 1;
953 
954 	UVMHIST_LOG(pdhist, "  second loop: page_shortage=%d", page_shortage,
955 	    0, 0, 0);
956 	for (p = uvm.page_active.tqh_first ;
957 	    p != NULL && page_shortage > 0 ; p = nextpg) {
958 		nextpg = p->pageq.tqe_next;
959 		if (p->flags & PG_BUSY)
960 			continue;	/* quick check before trying to lock */
961 
962 		/*
963 		 * lock owner
964 		 */
965 		/* is page anon owned or ownerless? */
966 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
967 
968 #ifdef DIAGNOSTIC
969 			if (p->uanon == NULL)
970 				panic("pagedaemon: page with no anon or "
971 				    "object detected - loop 2");
972 #endif
973 
974 			if (!simple_lock_try(&p->uanon->an_lock))
975 				continue;
976 
977 			/* take over the page? */
978 			if ((p->pqflags & PQ_ANON) == 0) {
979 
980 #ifdef DIAGNOSTIC
981 				if (p->loan_count < 1)
982 					panic("pagedaemon: non-loaned "
983 					    "ownerless page detected - loop 2");
984 #endif
985 
986 				p->loan_count--;
987 				p->pqflags |= PQ_ANON;
988 			}
989 
990 		} else {
991 
992 			if (!simple_lock_try(&p->uobject->vmobjlock))
993 				continue;
994 
995 		}
996 
997 		if ((p->flags & PG_BUSY) == 0) {
998 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
999 			/* no need to check wire_count as pg is "active" */
1000 			uvm_pagedeactivate(p);
1001 			uvmexp.pddeact++;
1002 			page_shortage--;
1003 		}
1004 
1005 		if (p->pqflags & PQ_ANON)
1006 			simple_unlock(&p->uanon->an_lock);
1007 		else
1008 			simple_unlock(&p->uobject->vmobjlock);
1009 	}
1010 
1011 	/*
1012 	 * done scan
1013 	 */
1014 }
1015