xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision 7c7c171d130af9949261bc7dce2150a03c3d239c)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.8 1998/03/09 00:58:59 mrg Exp $	*/
2 
3 /*
4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6  */
7 /*
8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
9  * Copyright (c) 1991, 1993, The Regents of the University of California.
10  *
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by Charles D. Cranor,
27  *      Washington University, the University of California, Berkeley and
28  *      its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
46  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
47  *
48  *
49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50  * All rights reserved.
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 #include "opt_uvmhist.h"
74 
75 /*
76  * uvm_pdaemon.c: the page daemon
77  */
78 
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_kern.h>
87 
88 #include <uvm/uvm.h>
89 
90 /*
91  * local prototypes
92  */
93 
94 static void		uvmpd_scan __P((void));
95 static boolean_t	uvmpd_scan_inactive __P((struct pglist *));
96 static void		uvmpd_tune __P((void));
97 
98 
99 /*
100  * uvm_wait: wait (sleep) for the page daemon to free some pages
101  *
102  * => should be called with all locks released
103  * => should _not_ be called by the page daemon (to avoid deadlock)
104  */
105 
106 void uvm_wait(wmsg)
107 	char *wmsg;
108 {
109 	int timo = 0;
110 	int s = splbio();
111 
112 	/*
113 	 * check for page daemon going to sleep (waiting for itself)
114 	 */
115 
116 	if (curproc == uvm.pagedaemon_proc) {
117 		/*
118 		 * now we have a problem: the pagedaemon wants to go to
119 		 * sleep until it frees more memory.   but how can it
120 		 * free more memory if it is asleep?  that is a deadlock.
121 		 * we have two options:
122 		 *  [1] panic now
123 		 *  [2] put a timeout on the sleep, thus causing the
124 		 *      pagedaemon to only pause (rather than sleep forever)
125 		 *
126 		 * note that option [2] will only help us if we get lucky
127 		 * and some other process on the system breaks the deadlock
128 		 * by exiting or freeing memory (thus allowing the pagedaemon
129 		 * to continue).  for now we panic if DEBUG is defined,
130 		 * otherwise we hope for the best with option [2] (better
131 		 * yet, this should never happen in the first place!).
132 		 */
133 
134 		printf("pagedaemon: deadlock detected!\n");
135 		timo = hz >> 3;		/* set timeout */
136 #if defined(DEBUG)
137 		/* DEBUG: panic so we can debug it */
138 		panic("pagedaemon deadlock");
139 #endif
140 	}
141 
142 	simple_lock(&uvm.pagedaemon_lock);
143 	thread_wakeup(&uvm.pagedaemon);		/* wake the daemon! */
144 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
145 	    timo);
146 
147 	splx(s);
148 }
149 
150 
151 /*
152  * uvmpd_tune: tune paging parameters
153  *
154  * => called when ever memory is added (or removed?) to the system
155  * => caller must call with page queues locked
156  */
157 
158 static void
159 uvmpd_tune()
160 {
161 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
162 
163 	uvmexp.freemin = uvmexp.npages / 20;
164 
165 	/* between 16k and 256k */
166 	/* XXX:  what are these values good for? */
167 	uvmexp.freemin = max(uvmexp.freemin, (16*1024)/PAGE_SIZE);
168 	uvmexp.freemin = min(uvmexp.freemin, (256*1024)/PAGE_SIZE);
169 
170 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
171 	if (uvmexp.freetarg <= uvmexp.freemin)
172 		uvmexp.freetarg = uvmexp.freemin + 1;
173 
174 	/* uvmexp.inactarg: computed in main daemon loop */
175 
176 	uvmexp.wiredmax = uvmexp.npages / 3;
177 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
178 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
179 }
180 
181 /*
182  * uvm_pageout: the main loop for the pagedaemon
183  */
184 
185 void
186 uvm_pageout()
187 {
188 	int npages = 0;
189 	int s;
190 	struct uvm_aiodesc *aio, *nextaio;
191 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
192 
193 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
194 
195 	/*
196 	 * ensure correct priority and set paging parameters...
197 	 */
198 
199 	uvm.pagedaemon_proc = curproc;
200 	(void) spl0();
201 	uvm_lock_pageq();
202 	npages = uvmexp.npages;
203 	uvmpd_tune();
204 	uvm_unlock_pageq();
205 
206 	/*
207 	 * main loop
208 	 */
209 	while (TRUE) {
210 
211 		/*
212 		 * carefully attempt to go to sleep (without losing "wakeups"!).
213 		 * we need splbio because we want to make sure the aio_done list
214 		 * is totally empty before we go to sleep.
215 		 */
216 
217 		s = splbio();
218 		simple_lock(&uvm.pagedaemon_lock);
219 
220 		/*
221 		 * if we've got done aio's, then bypass the sleep
222 		 */
223 
224 		if (uvm.aio_done.tqh_first == NULL) {
225 			UVMHIST_LOG(maphist,"  <<SLEEPING>>",0,0,0,0);
226 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
227 			    &uvm.pagedaemon_lock, FALSE, "daemon_slp", 0);
228 			uvmexp.pdwoke++;
229 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
230 
231 			/* relock pagedaemon_lock, still at splbio */
232 			simple_lock(&uvm.pagedaemon_lock);
233 		}
234 
235 		/*
236 		 * check for done aio structures
237 		 */
238 
239 		aio = uvm.aio_done.tqh_first;	/* save current list (if any)*/
240 		if (aio) {
241 			TAILQ_INIT(&uvm.aio_done);	/* zero global list */
242 		}
243 
244 		simple_unlock(&uvm.pagedaemon_lock);	/* unlock */
245 		splx(s);				/* drop splbio */
246 
247 		/*
248 		 * first clear out any pending aios (to free space in case we
249 		 * want to pageout more stuff).
250 		 */
251 
252 		for (/*null*/; aio != NULL ; aio = nextaio) {
253 
254 			uvmexp.paging -= aio->npages;
255 			nextaio = aio->aioq.tqe_next;
256 			aio->aiodone(aio);
257 
258 		}
259 
260 		/*
261 		 * now lock page queues and recompute inactive count
262 		 */
263 		uvm_lock_pageq();
264 
265 		if (npages != uvmexp.npages) {	/* check for new pages? */
266 			npages = uvmexp.npages;
267 			uvmpd_tune();
268 		}
269 
270 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
271 		if (uvmexp.inactarg <= uvmexp.freetarg)
272 			uvmexp.inactarg = uvmexp.freetarg + 1;
273 
274 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
275 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
276 		    uvmexp.inactarg);
277 
278 		/*
279 		 * scan if needed
280 		 * [XXX: note we are reading uvm.free without locking]
281 		 */
282 		if (uvmexp.free < uvmexp.freetarg ||
283 		    uvmexp.inactive < uvmexp.inactarg)
284 			uvmpd_scan();
285 
286 		/*
287 		 * done scan.  unlock page queues (the only lock we are holding)
288 		 */
289 		uvm_unlock_pageq();
290 
291 		/*
292 		 * done!    restart loop.
293 		 */
294 		thread_wakeup(&uvmexp.free);
295 	}
296 	/*NOTREACHED*/
297 }
298 
299 /*
300  * uvmpd_scan_inactive: the first loop of uvmpd_scan broken out into
301  * 	its own function for ease of reading.
302  *
303  * => called with page queues locked
304  * => we work on meeting our free target by converting inactive pages
305  *    into free pages.
306  * => we handle the building of swap-backed clusters
307  * => we return TRUE if we are exiting because we met our target
308  */
309 
310 static boolean_t
311 uvmpd_scan_inactive(pglst)
312 	struct pglist *pglst;
313 {
314 	boolean_t retval = FALSE;	/* assume we haven't hit target */
315 	int s, free, result;
316 	struct vm_page *p, *nextpg;
317 	struct uvm_object *uobj;
318 	struct vm_page *pps[MAXBSIZE/PAGE_SIZE], **ppsp;
319 	int npages;
320 	struct vm_page *swpps[MAXBSIZE/PAGE_SIZE]; 	/* XXX: see below */
321 	int swnpages, swcpages;				/* XXX: see below */
322 	int swslot, oldslot;
323 	struct vm_anon *anon;
324 	boolean_t swap_backed;
325 	vm_offset_t start;
326 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
327 
328 	/*
329 	 * note: we currently keep swap-backed pages on a seperate inactive
330 	 * list from object-backed pages.   however, merging the two lists
331 	 * back together again hasn't been ruled out.   thus, we keep our
332 	 * swap cluster in "swpps" rather than in pps (allows us to mix
333 	 * clustering types in the event of a mixed inactive queue).
334 	 */
335 
336 	/*
337 	 * swslot is non-zero if we are building a swap cluster.  we want
338 	 * to stay in the loop while we have a page to scan or we have
339 	 * a swap-cluster to build.
340 	 */
341 	swslot = 0;
342 	swnpages = swcpages = 0;
343 	free = 0;
344 
345 	for (p = pglst->tqh_first ; p != NULL || swslot != 0 ; p = nextpg) {
346 
347 		/*
348 		 * note that p can be NULL iff we have traversed the whole
349 		 * list and need to do one final swap-backed clustered pageout.
350 		 */
351 		if (p) {
352 			/*
353 			 * update our copy of "free" and see if we've met
354 			 * our target
355 			 */
356 			s = splimp();
357 			uvm_lock_fpageq();
358 			free = uvmexp.free;
359 			uvm_unlock_fpageq();
360 			splx(s);
361 
362 			if (free >= uvmexp.freetarg) {
363 				UVMHIST_LOG(pdhist,"  met free target: "
364 				    "exit loop", 0, 0, 0, 0);
365 				retval = TRUE;		/* hit the target! */
366 
367 				if (swslot == 0)
368 					/* exit now if no swap-i/o pending */
369 					break;
370 
371 				/* set p to null to signal final swap i/o */
372 				p = NULL;
373 			}
374 		}
375 
376 		uobj = NULL;	/* be safe and shut gcc up */
377 		anon = NULL;	/* be safe and shut gcc up */
378 
379 		if (p) {	/* if (we have a new page to consider) */
380 			/*
381 			 * we are below target and have a new page to consider.
382 			 */
383 			uvmexp.pdscans++;
384 			nextpg = p->pageq.tqe_next;
385 
386 			/*
387 			 * move referenced pages back to active queue and
388 			 * skip to next page (unlikely to happen since
389 			 * inactive pages shouldn't have any valid mappings
390 			 * and we cleared reference before deactivating).
391 			 */
392 			if (pmap_is_referenced(PMAP_PGARG(p))) {
393 				uvm_pageactivate(p);
394 				uvmexp.pdreact++;
395 				continue;
396 			}
397 
398 			/*
399 			 * first we attempt to lock the object that this page
400 			 * belongs to.  if our attempt fails we skip on to
401 			 * the next page (no harm done).  it is important to
402 			 * "try" locking the object as we are locking in the
403 			 * wrong order (pageq -> object) and we don't want to
404 			 * get deadlocked.
405 			 *
406 			 * the only time we exepct to see an ownerless page
407 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
408 			 * anon has loaned a page from a uvm_object and the
409 			 * uvm_object has dropped the ownership.  in that
410 			 * case, the anon can "take over" the loaned page
411 			 * and make it its own.
412 			 */
413 
414 			/* is page part of an anon or ownerless ? */
415 			if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
416 
417 				anon = p->uanon;
418 
419 #ifdef DIAGNOSTIC
420 				/* to be on inactive q, page must be part
421 				 * of _something_ */
422 				if (anon == NULL)
423 					panic("pagedaemon: page with no anon "
424 					    "or object detected - loop 1");
425 #endif
426 
427 				if (!simple_lock_try(&anon->an_lock))
428 					/* lock failed, skip this page */
429 					continue;
430 
431 				/*
432 				 * if the page is ownerless, claim it in the
433 				 * name of "anon"!
434 				 */
435 				if ((p->pqflags & PQ_ANON) == 0) {
436 #ifdef DIAGNOSTIC
437 					if (p->loan_count < 1)
438 						panic("pagedaemon: non-loaned "
439 						    "ownerless page detected -"
440 						    " loop 1");
441 #endif
442 					p->loan_count--;
443 					p->pqflags |= PQ_ANON;      /* anon now owns it */
444 				}
445 
446 				if (p->flags & PG_BUSY) {
447 					simple_unlock(&anon->an_lock);
448 					uvmexp.pdbusy++;
449 					/* someone else owns page, skip it */
450 					continue;
451 				}
452 
453 				uvmexp.pdanscan++;
454 
455 			} else {
456 
457 				uobj = p->uobject;
458 
459 				if (!simple_lock_try(&uobj->vmobjlock))
460 					/* lock failed, skip this page */
461 					continue;
462 
463 				if (p->flags & PG_BUSY) {
464 					simple_unlock(&uobj->vmobjlock);
465 					uvmexp.pdbusy++;
466 					/* someone else owns page, skip it */
467 					continue;
468 				}
469 
470 				uvmexp.pdobscan++;
471 			}
472 
473 			/*
474 			 * we now have the object and the page queues locked.
475 			 * the page is not busy.   if the page is clean we
476 			 * can free it now and continue.
477 			 */
478 
479 			if (p->flags & PG_CLEAN) {
480 				/* zap all mappings with pmap_page_protect... */
481 				pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
482 				uvm_pagefree(p);
483 				uvmexp.pdfreed++;
484 
485 				if (anon) {
486 #ifdef DIAGNOSTIC
487 					/*
488 					 * an anonymous page can only be clean
489 					 * if it has valid backing store.
490 					 */
491 					if (anon->an_swslot == 0)
492 						panic("pagedaemon: clean anon "
493 						 "page without backing store?");
494 #endif
495 					/* remove from object */
496 					anon->u.an_page = NULL;
497 					simple_unlock(&anon->an_lock);
498 				} else {
499 					/* pagefree has already removed the
500 					 * page from the object */
501 					simple_unlock(&uobj->vmobjlock);
502 				}
503 				continue;
504 			}
505 
506 			/*
507 			 * this page is dirty, skip it if we'll have met our
508 			 * free target when all the current pageouts complete.
509 			 */
510 			if (free + uvmexp.paging > uvmexp.freetarg)
511 			{
512 				if (anon) {
513 					simple_unlock(&anon->an_lock);
514 				} else {
515 					simple_unlock(&uobj->vmobjlock);
516 				}
517 				continue;
518 			}
519 
520 			/*
521 			 * the page we are looking at is dirty.   we must
522 			 * clean it before it can be freed.  to do this we
523 			 * first mark the page busy so that no one else will
524 			 * touch the page.   we write protect all the mappings
525 			 * of the page so that no one touches it while it is
526 			 * in I/O.
527 			 */
528 
529 			swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
530 			p->flags |= PG_BUSY;		/* now we own it */
531 			UVM_PAGE_OWN(p, "scan_inactive");
532 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ);
533 			uvmexp.pgswapout++;
534 
535 			/*
536 			 * for swap-backed pages we need to (re)allocate
537 			 * swap space.
538 			 */
539 			if (swap_backed) {
540 
541 				/*
542 				 * free old swap slot (if any)
543 				 */
544 				if (anon) {
545 					if (anon->an_swslot) {
546 						uvm_swap_free(anon->an_swslot,
547 						    1);
548 						anon->an_swslot = 0;
549 					}
550 				} else {
551 					oldslot = uao_set_swslot(uobj,
552 					    p->offset/PAGE_SIZE, 0);
553 
554 					if (oldslot)
555 						uvm_swap_free(oldslot, 1);
556 				}
557 
558 				/*
559 				 * start new cluster (if necessary)
560 				 */
561 				if (swslot == 0) {
562 					/* want this much */
563 					swnpages = MAXBSIZE/PAGE_SIZE;
564 
565 					swslot = uvm_swap_alloc(&swnpages,
566 					    TRUE);
567 
568 					if (swslot == 0) {
569 						/* no swap?  give up! */
570 						p->flags &= ~PG_BUSY;
571 						UVM_PAGE_OWN(p, NULL);
572 						if (anon)
573 							simple_unlock(
574 							    &anon->an_lock);
575 						else
576 							simple_unlock(
577 							    &uobj->vmobjlock);
578 						continue;
579 					}
580 					swcpages = 0;	/* cluster is empty */
581 				}
582 
583 				/*
584 				 * add block to cluster
585 				 */
586 				swpps[swcpages] = p;
587 				uvmexp.pgswapout++;
588 				if (anon)
589 					anon->an_swslot = swslot + swcpages;
590 				else
591 					uao_set_swslot(uobj,
592 					    p->offset/PAGE_SIZE,
593 					    swslot + swcpages);
594 				swcpages++;
595 
596 				/* done (swap-backed) */
597 			}
598 
599 			/* end: if (p) ["if we have new page to consider"] */
600 		} else {
601 
602 			/* if p == NULL we must be doing a last swap i/o */
603 			swap_backed = TRUE;
604 		}
605 
606 		/*
607 		 * now consider doing the pageout.
608 		 *
609 		 * for swap-backed pages, we do the pageout if we have either
610 		 * filled the cluster (in which case (swnpages == swcpages) or
611 		 * run out of pages (p == NULL).
612 		 *
613 		 * for object pages, we always do the pageout.
614 		 */
615 		if (swap_backed) {
616 
617 			if (p) {	/* if we just added a page to cluster */
618 				if (anon)
619 					simple_unlock(&anon->an_lock);
620 				else
621 					simple_unlock(&uobj->vmobjlock);
622 
623 				/* cluster not full yet? */
624 				if (swcpages < swnpages)
625 					continue;
626 			}
627 
628 			/* starting I/O now... set up for it */
629 			npages = swcpages;
630 			ppsp = swpps;
631 			/* for swap-backed pages only */
632 			start = (vm_offset_t) swslot;
633 
634 			/* if this is final pageout we could have a few
635 			 * extra swap blocks */
636 			if (swcpages < swnpages) {
637 				uvm_swap_free(swslot + swcpages,
638 				    (swnpages - swcpages));
639 			}
640 
641 		} else {
642 
643 			/* normal object pageout */
644 			ppsp = pps;
645 			npages = sizeof(pps) / sizeof(struct vm_page *);
646 			/* not looked at because PGO_ALLPAGES is set */
647 			start = 0;
648 
649 		}
650 
651 		/*
652 		 * now do the pageout.
653 		 *
654 		 * for swap_backed pages we have already built the cluster.
655 		 * for !swap_backed pages, uvm_pager_put will call the object's
656 		 * "make put cluster" function to build a cluster on our behalf.
657 		 *
658 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
659 		 * it to free the cluster pages for us on a successful I/O (it
660 		 * always does this for un-successful I/O requests).  this
661 		 * allows us to do clustered pageout without having to deal
662 		 * with cluster pages at this level.
663 		 *
664 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
665 		 *  IN: locked: uobj (if !swap_backed), page queues
666 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
667 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
668 		 *
669 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
670 		 */
671 
672 		/* locked: uobj (if !swap_backed), page queues */
673 		uvmexp.pdpageouts++;
674 		result = uvm_pager_put((swap_backed) ? NULL : uobj, p,
675 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
676 		/* locked: uobj (if !swap_backed && result != PEND) */
677 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
678 
679 		/*
680 		 * if we did i/o to swap, zero swslot to indicate that we are
681 		 * no longer building a swap-backed cluster.
682 		 */
683 
684 		if (swap_backed)
685 			swslot = 0;		/* done with this cluster */
686 
687 		/*
688 		 * first, we check for VM_PAGER_PEND which means that the
689 		 * async I/O is in progress and the async I/O done routine
690 		 * will clean up after us.   in this case we move on to the
691 		 * next page.
692 		 *
693 		 * there is a very remote chance that the pending async i/o can
694 		 * finish _before_ we get here.   if that happens, our page "p"
695 		 * may no longer be on the inactive queue.   so we verify this
696 		 * when determining the next page (starting over at the head if
697 		 * we've lost our inactive page).
698 		 */
699 
700 		if (result == VM_PAGER_PEND) {
701 			uvmexp.paging += npages;
702 			uvm_lock_pageq();		/* relock page queues */
703 			uvmexp.pdpending++;
704 			if (p) {
705 				if (p->pqflags & PQ_INACTIVE)
706 					/* reload! */
707 					nextpg = p->pageq.tqe_next;
708 				else
709 					/* reload! */
710 					nextpg = pglst->tqh_first;
711 				} else {
712 					nextpg = NULL;		/* done list */
713 			}
714 			continue;
715 		}
716 
717 		/*
718 		 * clean up "p" if we have one
719 		 */
720 
721 		if (p) {
722 			/*
723 			 * the I/O request to "p" is done and uvm_pager_put
724 			 * has freed any cluster pages it may have allocated
725 			 * during I/O.  all that is left for us to do is
726 			 * clean up page "p" (which is still PG_BUSY).
727 			 *
728 			 * our result could be one of the following:
729 			 *   VM_PAGER_OK: successful pageout
730 			 *
731 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
732 			 *     to next page
733 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
734 			 *     "reactivate" page to get it out of the way (it
735 			 *     will eventually drift back into the inactive
736 			 *     queue for a retry).
737 			 *   VM_PAGER_UNLOCK: should never see this as it is
738 			 *     only valid for "get" operations
739 			 */
740 
741 			/* relock p's object: page queues not lock yet, so
742 			 * no need for "try" */
743 
744 			/* !swap_backed case: already locked... */
745 			if (swap_backed) {
746 				if (anon)
747 					simple_lock(&anon->an_lock);
748 				else
749 					simple_lock(&uobj->vmobjlock);
750 			}
751 
752 #ifdef DIAGNOSTIC
753 			if (result == VM_PAGER_UNLOCK)
754 				panic("pagedaemon: pageout returned "
755 				    "invalid 'unlock' code");
756 #endif
757 
758 			/* handle PG_WANTED now */
759 			if (p->flags & PG_WANTED)
760 				/* still holding object lock */
761 				thread_wakeup(p);
762 
763 			p->flags &= ~(PG_BUSY|PG_WANTED);
764 			UVM_PAGE_OWN(p, NULL);
765 
766 			/* released during I/O? */
767 			if (p->flags & PG_RELEASED) {
768 				if (anon) {
769 					/* remove page so we can get nextpg */
770 					anon->u.an_page = NULL;
771 
772 					/* XXX needed? */
773 					simple_unlock(&anon->an_lock);
774 					uvm_anfree(anon);	/* kills anon */
775 					pmap_page_protect(PMAP_PGARG(p),
776 					    VM_PROT_NONE);
777 					anon = NULL;
778 					uvm_lock_pageq();
779 					nextpg = p->pageq.tqe_next;
780 					/* free released page */
781 					uvm_pagefree(p);
782 
783 				} else {
784 
785 #ifdef DIAGNOSTIC
786 					if (uobj->pgops->pgo_releasepg == NULL)
787 						panic("pagedaemon: no "
788 						   "pgo_releasepg function");
789 #endif
790 
791 					/*
792 					 * pgo_releasepg nukes the page and
793 					 * gets "nextpg" for us.  it returns
794 					 * with the page queues locked (when
795 					 * given nextpg ptr).
796 					 */
797 					if (!uobj->pgops->pgo_releasepg(p,
798 					    &nextpg))
799 						/* uobj died after release */
800 						uobj = NULL;
801 
802 					/*
803 					 * lock page queues here so that they're
804 					 * always locked at the end of the loop.
805 					 */
806 					uvm_lock_pageq();
807 				}
808 
809 			} else {	/* page was not released during I/O */
810 
811 				uvm_lock_pageq();
812 				nextpg = p->pageq.tqe_next;
813 
814 				if (result != VM_PAGER_OK) {
815 
816 					/* pageout was a failure... */
817 					if (result != VM_PAGER_AGAIN)
818 						uvm_pageactivate(p);
819 					pmap_clear_reference(PMAP_PGARG(p));
820 					/* XXXCDC: if (swap_backed) FREE p's
821 					 * swap block? */
822 
823 				} else {
824 
825 					/* pageout was a success... */
826 					pmap_clear_reference(PMAP_PGARG(p));
827 					pmap_clear_modify(PMAP_PGARG(p));
828 					p->flags |= PG_CLEAN;
829 					/* XXX: could free page here, but old
830 					 * pagedaemon does not */
831 
832 				}
833 			}
834 
835 			/*
836 			 * drop object lock (if there is an object left).   do
837 			 * a safety check of nextpg to make sure it is on the
838 			 * inactive queue (it should be since PG_BUSY pages on
839 			 * the inactive queue can't be re-queued [note: not
840 			 * true for active queue]).
841 			 */
842 
843 			if (anon)
844 				simple_unlock(&anon->an_lock);
845 			else if (uobj)
846 				simple_unlock(&uobj->vmobjlock);
847 
848 		} /* if (p) */ else {
849 
850 			/* if p is null in this loop, make sure it stays null
851 			 * in next loop */
852 			nextpg = NULL;
853 
854 			/*
855 			 * lock page queues here just so they're always locked
856 			 * at the end of the loop.
857 			 */
858 			uvm_lock_pageq();
859 		}
860 
861 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
862 			printf("pagedaemon: invalid nextpg!   reverting to "
863 			    "queue head\n");
864 			nextpg = pglst->tqh_first;	/* reload! */
865 		}
866 
867 	}	/* end of "inactive" 'for' loop */
868 	return (retval);
869 }
870 
871 /*
872  * uvmpd_scan: scan the page queues and attempt to meet our targets.
873  *
874  * => called with pageq's locked
875  */
876 
877 void
878 uvmpd_scan()
879 {
880 	int s, free, pages_freed, page_shortage;
881 	struct vm_page *p, *nextpg;
882 	struct uvm_object *uobj;
883 	boolean_t got_it;
884 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
885 
886 	uvmexp.pdrevs++;		/* counter */
887 
888 #ifdef __GNUC__
889 	uobj = NULL;	/* XXX gcc */
890 #endif
891 	/*
892 	 * get current "free" page count
893 	 */
894 	s = splimp();
895 	uvm_lock_fpageq();
896 	free = uvmexp.free;
897 	uvm_unlock_fpageq();
898 	splx(s);
899 
900 #ifndef __SWAP_BROKEN
901 	/*
902 	 * swap out some processes if we are below our free target.
903 	 * we need to unlock the page queues for this.
904 	 */
905 	if (free < uvmexp.freetarg) {
906 
907 		uvmexp.pdswout++;
908 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout", free,
909 		    uvmexp.freetarg, 0, 0);
910 		uvm_unlock_pageq();
911 		uvm_swapout_threads();
912 		pmap_update();		/* update so we can scan inactive q */
913 		uvm_lock_pageq();
914 
915 	}
916 #endif
917 
918 	/*
919 	 * now we want to work on meeting our targets.   first we work on our
920 	 * free target by converting inactive pages into free pages.  then
921 	 * we work on meeting our inactive target by converting active pages
922 	 * to inactive ones.
923 	 */
924 
925 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
926 	pages_freed = uvmexp.pdfreed;	/* so far... */
927 
928 	/*
929 	 * do loop #1!   alternate starting queue between swap and object based
930 	 * on the low bit of uvmexp.pdrevs (which we bump by one each call).
931 	 */
932 
933 	got_it = FALSE;
934 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
935 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
936 	if (!got_it)
937 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
938 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
939 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
940 
941 	/*
942 	 * we have done the scan to get free pages.   now we work on meeting
943 	 * our inactive target.
944 	 */
945 
946 	page_shortage = uvmexp.inactarg - uvmexp.inactive;
947 	pages_freed = uvmexp.pdfreed - pages_freed; /* # pages freed in loop */
948 	if (page_shortage <= 0 && pages_freed == 0)
949 		page_shortage = 1;
950 
951 	UVMHIST_LOG(pdhist, "  second loop: page_shortage=%d", page_shortage,
952 	    0, 0, 0);
953 	for (p = uvm.page_active.tqh_first ;
954 	    p != NULL && page_shortage > 0 ; p = nextpg) {
955 		nextpg = p->pageq.tqe_next;
956 		if (p->flags & PG_BUSY)
957 			continue;	/* quick check before trying to lock */
958 
959 		/*
960 		 * lock owner
961 		 */
962 		/* is page anon owned or ownerless? */
963 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
964 
965 #ifdef DIAGNOSTIC
966 			if (p->uanon == NULL)
967 				panic("pagedaemon: page with no anon or "
968 				    "object detected - loop 2");
969 #endif
970 
971 			if (!simple_lock_try(&p->uanon->an_lock))
972 				continue;
973 
974 			/* take over the page? */
975 			if ((p->pqflags & PQ_ANON) == 0) {
976 
977 #ifdef DIAGNOSTIC
978 				if (p->loan_count < 1)
979 					panic("pagedaemon: non-loaned "
980 					    "ownerless page detected - loop 2");
981 #endif
982 
983 				p->loan_count--;
984 				p->pqflags |= PQ_ANON;
985 			}
986 
987 		} else {
988 
989 			if (!simple_lock_try(&p->uobject->vmobjlock))
990 				continue;
991 
992 		}
993 
994 		if ((p->flags & PG_BUSY) == 0) {
995 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
996 			/* no need to check wire_count as pg is "active" */
997 			uvm_pagedeactivate(p);
998 			uvmexp.pddeact++;
999 			page_shortage--;
1000 		}
1001 
1002 		if (p->pqflags & PQ_ANON)
1003 			simple_unlock(&p->uanon->an_lock);
1004 		else
1005 			simple_unlock(&p->uobject->vmobjlock);
1006 	}
1007 
1008 	/*
1009 	 * done scan
1010 	 */
1011 }
1012