xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision 27578b9aac214cc7796ead81dcc5427e79d5f2a0)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.37 2001/09/15 20:36:47 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 #include "opt_uvmhist.h"
70 
71 /*
72  * uvm_pdaemon.c: the page daemon
73  */
74 
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/pool.h>
80 #include <sys/buf.h>
81 #include <sys/vnode.h>
82 
83 #include <uvm/uvm.h>
84 
85 /*
86  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedeamon will reactivate
87  * in a pass thru the inactive list when swap is full.  the value should be
88  * "small"... if it's too large we'll cycle the active pages thru the inactive
89  * queue too quickly to for them to be referenced and avoid being freed.
90  */
91 
92 #define UVMPD_NUMDIRTYREACTS 16
93 
94 
95 /*
96  * local prototypes
97  */
98 
99 void		uvmpd_scan __P((void));
100 boolean_t	uvmpd_scan_inactive __P((struct pglist *));
101 void		uvmpd_tune __P((void));
102 
103 /*
104  * uvm_wait: wait (sleep) for the page daemon to free some pages
105  *
106  * => should be called with all locks released
107  * => should _not_ be called by the page daemon (to avoid deadlock)
108  */
109 
110 void
111 uvm_wait(wmsg)
112 	const char *wmsg;
113 {
114 	int timo = 0;
115 	int s = splbio();
116 
117 	/*
118 	 * check for page daemon going to sleep (waiting for itself)
119 	 */
120 
121 	if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
122 		/*
123 		 * now we have a problem: the pagedaemon wants to go to
124 		 * sleep until it frees more memory.   but how can it
125 		 * free more memory if it is asleep?  that is a deadlock.
126 		 * we have two options:
127 		 *  [1] panic now
128 		 *  [2] put a timeout on the sleep, thus causing the
129 		 *      pagedaemon to only pause (rather than sleep forever)
130 		 *
131 		 * note that option [2] will only help us if we get lucky
132 		 * and some other process on the system breaks the deadlock
133 		 * by exiting or freeing memory (thus allowing the pagedaemon
134 		 * to continue).  for now we panic if DEBUG is defined,
135 		 * otherwise we hope for the best with option [2] (better
136 		 * yet, this should never happen in the first place!).
137 		 */
138 
139 		printf("pagedaemon: deadlock detected!\n");
140 		timo = hz >> 3;		/* set timeout */
141 #if defined(DEBUG)
142 		/* DEBUG: panic so we can debug it */
143 		panic("pagedaemon deadlock");
144 #endif
145 	}
146 
147 	simple_lock(&uvm.pagedaemon_lock);
148 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
149 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
150 	    timo);
151 
152 	splx(s);
153 }
154 
155 
156 /*
157  * uvmpd_tune: tune paging parameters
158  *
159  * => called when ever memory is added (or removed?) to the system
160  * => caller must call with page queues locked
161  */
162 
163 void
164 uvmpd_tune(void)
165 {
166 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
167 
168 	uvmexp.freemin = uvmexp.npages / 20;
169 
170 	/* between 16k and 256k */
171 	/* XXX:  what are these values good for? */
172 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
173 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
174 
175 	/* Make sure there's always a user page free. */
176 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
177 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
178 
179 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
180 	if (uvmexp.freetarg <= uvmexp.freemin)
181 		uvmexp.freetarg = uvmexp.freemin + 1;
182 
183 	/* uvmexp.inactarg: computed in main daemon loop */
184 
185 	uvmexp.wiredmax = uvmexp.npages / 3;
186 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
187 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
188 }
189 
190 /*
191  * uvm_pageout: the main loop for the pagedaemon
192  */
193 
194 void
195 uvm_pageout(void *arg)
196 {
197 	int npages = 0;
198 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
199 
200 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
201 
202 	/*
203 	 * ensure correct priority and set paging parameters...
204 	 */
205 
206 	uvm.pagedaemon_proc = curproc;
207 	uvm_lock_pageq();
208 	npages = uvmexp.npages;
209 	uvmpd_tune();
210 	uvm_unlock_pageq();
211 
212 	/*
213 	 * main loop
214 	 */
215 
216 	for (;;) {
217 		simple_lock(&uvm.pagedaemon_lock);
218 
219 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
220 		UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
221 		    &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
222 		uvmexp.pdwoke++;
223 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
224 
225 		/* drain pool resources */
226 		pool_drain(0);
227 
228 		/*
229 		 * now lock page queues and recompute inactive count
230 		 */
231 
232 		uvm_lock_pageq();
233 		if (npages != uvmexp.npages) {	/* check for new pages? */
234 			npages = uvmexp.npages;
235 			uvmpd_tune();
236 		}
237 
238 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
239 		if (uvmexp.inactarg <= uvmexp.freetarg) {
240 			uvmexp.inactarg = uvmexp.freetarg + 1;
241 		}
242 
243 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
244 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
245 		    uvmexp.inactarg);
246 
247 		/*
248 		 * scan if needed
249 		 */
250 
251 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
252 		    uvmexp.inactive < uvmexp.inactarg) {
253 			uvmpd_scan();
254 		}
255 
256 		/*
257 		 * if there's any free memory to be had,
258 		 * wake up any waiters.
259 		 */
260 
261 		if (uvmexp.free > uvmexp.reserve_kernel ||
262 		    uvmexp.paging == 0) {
263 			wakeup(&uvmexp.free);
264 		}
265 
266 		/*
267 		 * scan done.  unlock page queues (the only lock we are holding)
268 		 */
269 
270 		uvm_unlock_pageq();
271 	}
272 	/*NOTREACHED*/
273 }
274 
275 
276 /*
277  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
278  */
279 
280 void
281 uvm_aiodone_daemon(void *arg)
282 {
283 	int s, free;
284 	struct buf *bp, *nbp;
285 	UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
286 
287 	for (;;) {
288 
289 		/*
290 		 * carefully attempt to go to sleep (without losing "wakeups"!).
291 		 * we need splbio because we want to make sure the aio_done list
292 		 * is totally empty before we go to sleep.
293 		 */
294 
295 		s = splbio();
296 		simple_lock(&uvm.aiodoned_lock);
297 		if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
298 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
299 			UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
300 			    &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
301 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
302 
303 			/* relock aiodoned_lock, still at splbio */
304 			simple_lock(&uvm.aiodoned_lock);
305 		}
306 
307 		/*
308 		 * check for done aio structures
309 		 */
310 
311 		bp = TAILQ_FIRST(&uvm.aio_done);
312 		if (bp) {
313 			TAILQ_INIT(&uvm.aio_done);
314 		}
315 
316 		simple_unlock(&uvm.aiodoned_lock);
317 		splx(s);
318 
319 		/*
320 		 * process each i/o that's done.
321 		 */
322 
323 		free = uvmexp.free;
324 		while (bp != NULL) {
325 			nbp = TAILQ_NEXT(bp, b_freelist);
326 			(*bp->b_iodone)(bp);
327 			bp = nbp;
328 		}
329 		if (free <= uvmexp.reserve_kernel) {
330 			s = uvm_lock_fpageq();
331 			wakeup(&uvm.pagedaemon);
332 			uvm_unlock_fpageq(s);
333 		} else {
334 			simple_lock(&uvm.pagedaemon_lock);
335 			wakeup(&uvmexp.free);
336 			simple_unlock(&uvm.pagedaemon_lock);
337 		}
338 	}
339 }
340 
341 /*
342  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
343  *
344  * => called with page queues locked
345  * => we work on meeting our free target by converting inactive pages
346  *    into free pages.
347  * => we handle the building of swap-backed clusters
348  * => we return TRUE if we are exiting because we met our target
349  */
350 
351 boolean_t
352 uvmpd_scan_inactive(pglst)
353 	struct pglist *pglst;
354 {
355 	boolean_t retval = FALSE;	/* assume we haven't hit target */
356 	int error;
357 	struct vm_page *p, *nextpg;
358 	struct uvm_object *uobj;
359 	struct vm_anon *anon;
360 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT];
361 	struct simplelock *slock;
362 	int swnpages, swcpages;
363 	int swslot;
364 	int dirtyreacts, t, result;
365 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
366 
367 	/*
368 	 * swslot is non-zero if we are building a swap cluster.  we want
369 	 * to stay in the loop while we have a page to scan or we have
370 	 * a swap-cluster to build.
371 	 */
372 
373 	swslot = 0;
374 	swnpages = swcpages = 0;
375 	dirtyreacts = 0;
376 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
377 		uobj = NULL;
378 		anon = NULL;
379 		if (p) {
380 
381 			/*
382 			 * see if we've met the free target.
383 			 */
384 
385 			if (uvmexp.free + uvmexp.paging >=
386 			    uvmexp.freetarg << 2 ||
387 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
388 				UVMHIST_LOG(pdhist,"  met free target: "
389 					    "exit loop", 0, 0, 0, 0);
390 				retval = TRUE;
391 
392 				if (swslot == 0) {
393 					/* exit now if no swap-i/o pending */
394 					break;
395 				}
396 
397 				/* set p to null to signal final swap i/o */
398 				p = NULL;
399 				nextpg = NULL;
400 			}
401 		}
402 		if (p) {	/* if (we have a new page to consider) */
403 
404 			/*
405 			 * we are below target and have a new page to consider.
406 			 */
407 
408 			uvmexp.pdscans++;
409 			nextpg = TAILQ_NEXT(p, pageq);
410 
411 			/*
412 			 * move referenced pages back to active queue and
413 			 * skip to next page.
414 			 */
415 
416 			if (pmap_clear_reference(p)) {
417 				uvm_pageactivate(p);
418 				uvmexp.pdreact++;
419 				continue;
420 			}
421 			anon = p->uanon;
422 			uobj = p->uobject;
423 
424 			/*
425 			 * enforce the minimum thresholds on different
426 			 * types of memory usage.  if reusing the current
427 			 * page would reduce that type of usage below its
428 			 * minimum, reactivate the page instead and move
429 			 * on to the next page.
430 			 */
431 
432 			t = uvmexp.active + uvmexp.inactive + uvmexp.free;
433 			if (anon &&
434 			    uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8) {
435 				uvm_pageactivate(p);
436 				uvmexp.pdreanon++;
437 				continue;
438 			}
439 			if (uobj && UVM_OBJ_IS_VTEXT(uobj) &&
440 			    uvmexp.vtextpages <= (t * uvmexp.vtextmin) >> 8) {
441 				uvm_pageactivate(p);
442 				uvmexp.pdrevtext++;
443 				continue;
444 			}
445 			if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
446 			    !UVM_OBJ_IS_VTEXT(uobj) &&
447 			    uvmexp.vnodepages <= (t * uvmexp.vnodemin) >> 8) {
448 				uvm_pageactivate(p);
449 				uvmexp.pdrevnode++;
450 				continue;
451 			}
452 
453 			/*
454 			 * first we attempt to lock the object that this page
455 			 * belongs to.  if our attempt fails we skip on to
456 			 * the next page (no harm done).  it is important to
457 			 * "try" locking the object as we are locking in the
458 			 * wrong order (pageq -> object) and we don't want to
459 			 * deadlock.
460 			 *
461 			 * the only time we expect to see an ownerless page
462 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
463 			 * anon has loaned a page from a uvm_object and the
464 			 * uvm_object has dropped the ownership.  in that
465 			 * case, the anon can "take over" the loaned page
466 			 * and make it its own.
467 			 */
468 
469 			/* is page part of an anon or ownerless ? */
470 			if ((p->pqflags & PQ_ANON) || uobj == NULL) {
471 				KASSERT(anon != NULL);
472 				slock = &anon->an_lock;
473 				if (!simple_lock_try(slock)) {
474 					/* lock failed, skip this page */
475 					continue;
476 				}
477 
478 				/*
479 				 * if the page is ownerless, claim it in the
480 				 * name of "anon"!
481 				 */
482 
483 				if ((p->pqflags & PQ_ANON) == 0) {
484 					KASSERT(p->loan_count > 0);
485 					p->loan_count--;
486 					p->pqflags |= PQ_ANON;
487 					/* anon now owns it */
488 				}
489 				if (p->flags & PG_BUSY) {
490 					simple_unlock(slock);
491 					uvmexp.pdbusy++;
492 					continue;
493 				}
494 				uvmexp.pdanscan++;
495 			} else {
496 				KASSERT(uobj != NULL);
497 				slock = &uobj->vmobjlock;
498 				if (!simple_lock_try(slock)) {
499 					continue;
500 				}
501 				if (p->flags & PG_BUSY) {
502 					simple_unlock(slock);
503 					uvmexp.pdbusy++;
504 					continue;
505 				}
506 				uvmexp.pdobscan++;
507 			}
508 
509 
510 			/*
511 			 * we now have the object and the page queues locked.
512 			 * if the page is not swap-backed, call the object's
513 			 * pager to flush and free the page.
514 			 */
515 
516 			if ((p->pqflags & PQ_SWAPBACKED) == 0) {
517 				uvm_unlock_pageq();
518 				error = (uobj->pgops->pgo_put)(uobj, p->offset,
519 				    p->offset + PAGE_SIZE,
520 				    PGO_CLEANIT|PGO_FREE);
521 				uvm_lock_pageq();
522 				if (nextpg &&
523 				    (nextpg->flags & PQ_INACTIVE) == 0) {
524 					nextpg = TAILQ_FIRST(pglst);
525 				}
526 				continue;
527 			}
528 
529 			/*
530 			 * the page is swap-backed.  remove all the permissions
531 			 * from the page so we can sync the modified info
532 			 * without any race conditions.  if the page is clean
533 			 * we can free it now and continue.
534 			 */
535 
536 			pmap_page_protect(p, VM_PROT_NONE);
537 			if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
538 				p->flags &= ~(PG_CLEAN);
539 			}
540 			if (p->flags & PG_CLEAN) {
541 				uvm_pagefree(p);
542 				uvmexp.pdfreed++;
543 
544 				/*
545 				 * for anons, we need to remove the page
546 				 * from the anon ourselves.  for aobjs,
547 				 * pagefree did that for us.
548 				 */
549 
550 				if (anon) {
551 					KASSERT(anon->an_swslot != 0);
552 					anon->u.an_page = NULL;
553 				}
554 				simple_unlock(slock);
555 				continue;
556 			}
557 
558 			/*
559 			 * this page is dirty, skip it if we'll have met our
560 			 * free target when all the current pageouts complete.
561 			 */
562 
563 			if (uvmexp.free + uvmexp.paging >
564 			    uvmexp.freetarg << 2) {
565 				simple_unlock(slock);
566 				continue;
567 			}
568 
569 			/*
570 			 * free any swap space allocated to the page since
571 			 * we'll have to write it again with its new data.
572 			 */
573 
574 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
575 				uvm_swap_free(anon->an_swslot, 1);
576 				anon->an_swslot = 0;
577 			} else if (p->pqflags & PQ_AOBJ) {
578 				uao_dropswap(uobj, p->offset >> PAGE_SHIFT);
579 			}
580 
581 			/*
582 			 * if all pages in swap are only in swap,
583 			 * the swap space is full and we can't page out
584 			 * any more swap-backed pages.  reactivate this page
585 			 * so that we eventually cycle all pages through
586 			 * the inactive queue.
587 			 */
588 
589 			KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
590 			if (uvmexp.swpgonly == uvmexp.swpages) {
591 				dirtyreacts++;
592 				uvm_pageactivate(p);
593 				simple_unlock(slock);
594 				continue;
595 			}
596 
597 			/*
598 			 * start new swap pageout cluster (if necessary).
599 			 */
600 
601 			if (swslot == 0) {
602 				swnpages = MAXBSIZE >> PAGE_SHIFT;
603 				swslot = uvm_swap_alloc(&swnpages, TRUE);
604 				if (swslot == 0) {
605 					simple_unlock(slock);
606 					continue;
607 				}
608 				swcpages = 0;
609 			}
610 
611 			/*
612 			 * at this point, we're definitely going reuse this
613 			 * page.  mark the page busy and delayed-free.
614 			 * we should remove the page from the page queues
615 			 * so we don't ever look at it again.
616 			 * adjust counters and such.
617 			 */
618 
619 			p->flags |= PG_BUSY;
620 			UVM_PAGE_OWN(p, "scan_inactive");
621 
622 			p->flags |= PG_PAGEOUT;
623 			uvmexp.paging++;
624 			uvm_pagedequeue(p);
625 
626 			uvmexp.pgswapout++;
627 
628 			/*
629 			 * add the new page to the cluster.
630 			 */
631 
632 			if (anon) {
633 				anon->an_swslot = swslot + swcpages;
634 				simple_unlock(slock);
635 			} else {
636 				result = uao_set_swslot(uobj,
637 				    p->offset >> PAGE_SHIFT, swslot + swcpages);
638 				if (result == -1) {
639 					p->flags &= ~(PG_BUSY|PG_PAGEOUT);
640 					UVM_PAGE_OWN(p, NULL);
641 					uvmexp.paging--;
642 					uvm_pageactivate(p);
643 					simple_unlock(slock);
644 					continue;
645 				}
646 				simple_unlock(slock);
647 			}
648 			swpps[swcpages] = p;
649 			swcpages++;
650 
651 			/*
652 			 * if the cluster isn't full, look for more pages
653 			 * before starting the i/o.
654 			 */
655 
656 			if (swcpages < swnpages) {
657 				continue;
658 			}
659 		}
660 
661 		/*
662 		 * if this is the final pageout we could have a few
663 		 * unused swap blocks.  if so, free them now.
664 		 */
665 
666 		if (swcpages < swnpages) {
667 			uvm_swap_free(swslot + swcpages, (swnpages - swcpages));
668 		}
669 
670 		/*
671 		 * now start the pageout.
672 		 */
673 
674 		uvm_unlock_pageq();
675 		uvmexp.pdpageouts++;
676 		error = uvm_swap_put(swslot, swpps, swcpages, 0);
677 		KASSERT(error == 0);
678 		uvm_lock_pageq();
679 
680 		/*
681 		 * zero swslot to indicate that we are
682 		 * no longer building a swap-backed cluster.
683 		 */
684 
685 		swslot = 0;
686 
687 		/*
688 		 * the pageout is in progress.  bump counters and set up
689 		 * for the next loop.
690 		 */
691 
692 		uvmexp.pdpending++;
693 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
694 			nextpg = TAILQ_FIRST(pglst);
695 		}
696 	}
697 	return (error);
698 }
699 
700 /*
701  * uvmpd_scan: scan the page queues and attempt to meet our targets.
702  *
703  * => called with pageq's locked
704  */
705 
706 void
707 uvmpd_scan(void)
708 {
709 	int inactive_shortage, swap_shortage, pages_freed;
710 	struct vm_page *p, *nextpg;
711 	struct uvm_object *uobj;
712 	struct vm_anon *anon;
713 	boolean_t got_it;
714 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
715 
716 	uvmexp.pdrevs++;
717 	uobj = NULL;
718 	anon = NULL;
719 
720 #ifndef __SWAP_BROKEN
721 	/*
722 	 * swap out some processes if we are below our free target.
723 	 * we need to unlock the page queues for this.
724 	 */
725 	if (uvmexp.free < uvmexp.freetarg) {
726 		uvmexp.pdswout++;
727 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
728 		    uvmexp.free, uvmexp.freetarg, 0, 0);
729 		uvm_unlock_pageq();
730 		uvm_swapout_threads();
731 		uvm_lock_pageq();
732 
733 	}
734 #endif
735 
736 	/*
737 	 * now we want to work on meeting our targets.   first we work on our
738 	 * free target by converting inactive pages into free pages.  then
739 	 * we work on meeting our inactive target by converting active pages
740 	 * to inactive ones.
741 	 */
742 
743 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
744 
745 	/*
746 	 * alternate starting queue between swap and object based on the
747 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
748 	 */
749 
750 	got_it = FALSE;
751 	pages_freed = uvmexp.pdfreed;
752 	(void) uvmpd_scan_inactive(&uvm.page_inactive);
753 	pages_freed = uvmexp.pdfreed - pages_freed;
754 
755 	/*
756 	 * we have done the scan to get free pages.   now we work on meeting
757 	 * our inactive target.
758 	 */
759 
760 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
761 
762 	/*
763 	 * detect if we're not going to be able to page anything out
764 	 * until we free some swap resources from active pages.
765 	 */
766 
767 	swap_shortage = 0;
768 	if (uvmexp.free < uvmexp.freetarg &&
769 	    uvmexp.swpginuse == uvmexp.swpages &&
770 	    uvmexp.swpgonly < uvmexp.swpages &&
771 	    pages_freed == 0) {
772 		swap_shortage = uvmexp.freetarg - uvmexp.free;
773 	}
774 
775 	UVMHIST_LOG(pdhist, "  loop 2: inactive_shortage=%d swap_shortage=%d",
776 		    inactive_shortage, swap_shortage,0,0);
777 	for (p = TAILQ_FIRST(&uvm.page_active);
778 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
779 	     p = nextpg) {
780 		nextpg = TAILQ_NEXT(p, pageq);
781 		if (p->flags & PG_BUSY) {
782 			continue;
783 		}
784 
785 		/*
786 		 * lock the page's owner.
787 		 */
788 		/* is page anon owned or ownerless? */
789 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
790 			anon = p->uanon;
791 			KASSERT(anon != NULL);
792 			if (!simple_lock_try(&anon->an_lock)) {
793 				continue;
794 			}
795 
796 			/* take over the page? */
797 			if ((p->pqflags & PQ_ANON) == 0) {
798 				KASSERT(p->loan_count > 0);
799 				p->loan_count--;
800 				p->pqflags |= PQ_ANON;
801 			}
802 		} else {
803 			uobj = p->uobject;
804 			if (!simple_lock_try(&uobj->vmobjlock)) {
805 				continue;
806 			}
807 		}
808 
809 		/*
810 		 * skip this page if it's busy.
811 		 */
812 
813 		if ((p->flags & PG_BUSY) != 0) {
814 			if (p->pqflags & PQ_ANON)
815 				simple_unlock(&anon->an_lock);
816 			else
817 				simple_unlock(&uobj->vmobjlock);
818 			continue;
819 		}
820 
821 		/*
822 		 * if there's a shortage of swap, free any swap allocated
823 		 * to this page so that other pages can be paged out.
824 		 */
825 
826 		if (swap_shortage > 0) {
827 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
828 				uvm_swap_free(anon->an_swslot, 1);
829 				anon->an_swslot = 0;
830 				p->flags &= ~PG_CLEAN;
831 				swap_shortage--;
832 			} else if (p->pqflags & PQ_AOBJ) {
833 				int slot = uao_set_swslot(uobj,
834 					p->offset >> PAGE_SHIFT, 0);
835 				if (slot) {
836 					uvm_swap_free(slot, 1);
837 					p->flags &= ~PG_CLEAN;
838 					swap_shortage--;
839 				}
840 			}
841 		}
842 
843 		/*
844 		 * if there's a shortage of inactive pages, deactivate.
845 		 */
846 
847 		if (inactive_shortage > 0) {
848 			/* no need to check wire_count as pg is "active" */
849 			uvm_pagedeactivate(p);
850 			uvmexp.pddeact++;
851 			inactive_shortage--;
852 		}
853 
854 		/*
855 		 * we're done with this page.
856 		 */
857 
858 		if (p->pqflags & PQ_ANON)
859 			simple_unlock(&anon->an_lock);
860 		else
861 			simple_unlock(&uobj->vmobjlock);
862 	}
863 }
864