xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision 69b6d498973bb4d7230c2d3c12bd9a032738ec8e)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.66 2005/07/30 06:33:36 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 /*
70  * uvm_pdaemon.c: the page daemon
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.66 2005/07/30 06:33:36 yamt Exp $");
75 
76 #include "opt_uvmhist.h"
77 
78 #include <sys/param.h>
79 #include <sys/proc.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/pool.h>
83 #include <sys/buf.h>
84 #include <sys/vnode.h>
85 
86 #include <uvm/uvm.h>
87 
88 /*
89  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
90  * in a pass thru the inactive list when swap is full.  the value should be
91  * "small"... if it's too large we'll cycle the active pages thru the inactive
92  * queue too quickly to for them to be referenced and avoid being freed.
93  */
94 
95 #define UVMPD_NUMDIRTYREACTS 16
96 
97 
98 /*
99  * local prototypes
100  */
101 
102 static void	uvmpd_scan(void);
103 static void	uvmpd_scan_inactive(struct pglist *);
104 static void	uvmpd_tune(void);
105 
106 /*
107  * XXX hack to avoid hangs when large processes fork.
108  */
109 int uvm_extrapages;
110 
111 /*
112  * uvm_wait: wait (sleep) for the page daemon to free some pages
113  *
114  * => should be called with all locks released
115  * => should _not_ be called by the page daemon (to avoid deadlock)
116  */
117 
118 void
119 uvm_wait(const char *wmsg)
120 {
121 	int timo = 0;
122 	int s = splbio();
123 
124 	/*
125 	 * check for page daemon going to sleep (waiting for itself)
126 	 */
127 
128 	if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
129 		/*
130 		 * now we have a problem: the pagedaemon wants to go to
131 		 * sleep until it frees more memory.   but how can it
132 		 * free more memory if it is asleep?  that is a deadlock.
133 		 * we have two options:
134 		 *  [1] panic now
135 		 *  [2] put a timeout on the sleep, thus causing the
136 		 *      pagedaemon to only pause (rather than sleep forever)
137 		 *
138 		 * note that option [2] will only help us if we get lucky
139 		 * and some other process on the system breaks the deadlock
140 		 * by exiting or freeing memory (thus allowing the pagedaemon
141 		 * to continue).  for now we panic if DEBUG is defined,
142 		 * otherwise we hope for the best with option [2] (better
143 		 * yet, this should never happen in the first place!).
144 		 */
145 
146 		printf("pagedaemon: deadlock detected!\n");
147 		timo = hz >> 3;		/* set timeout */
148 #if defined(DEBUG)
149 		/* DEBUG: panic so we can debug it */
150 		panic("pagedaemon deadlock");
151 #endif
152 	}
153 
154 	simple_lock(&uvm.pagedaemon_lock);
155 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
156 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
157 	    timo);
158 
159 	splx(s);
160 }
161 
162 
163 /*
164  * uvmpd_tune: tune paging parameters
165  *
166  * => called when ever memory is added (or removed?) to the system
167  * => caller must call with page queues locked
168  */
169 
170 static void
171 uvmpd_tune(void)
172 {
173 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
174 
175 	uvmexp.freemin = uvmexp.npages / 20;
176 
177 	/* between 16k and 256k */
178 	/* XXX:  what are these values good for? */
179 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
180 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
181 
182 	/* Make sure there's always a user page free. */
183 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
184 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
185 
186 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
187 	if (uvmexp.freetarg <= uvmexp.freemin)
188 		uvmexp.freetarg = uvmexp.freemin + 1;
189 
190 	uvmexp.freetarg += uvm_extrapages;
191 	uvm_extrapages = 0;
192 
193 	/* uvmexp.inactarg: computed in main daemon loop */
194 
195 	uvmexp.wiredmax = uvmexp.npages / 3;
196 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
197 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
198 }
199 
200 /*
201  * uvm_pageout: the main loop for the pagedaemon
202  */
203 
204 void
205 uvm_pageout(void *arg)
206 {
207 	int bufcnt, npages = 0;
208 	int extrapages = 0;
209 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
210 
211 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
212 
213 	/*
214 	 * ensure correct priority and set paging parameters...
215 	 */
216 
217 	uvm.pagedaemon_proc = curproc;
218 	uvm_lock_pageq();
219 	npages = uvmexp.npages;
220 	uvmpd_tune();
221 	uvm_unlock_pageq();
222 
223 	/*
224 	 * main loop
225 	 */
226 
227 	for (;;) {
228 		simple_lock(&uvm.pagedaemon_lock);
229 
230 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
231 		UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
232 		    &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
233 		uvmexp.pdwoke++;
234 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
235 
236 		/*
237 		 * now lock page queues and recompute inactive count
238 		 */
239 
240 		uvm_lock_pageq();
241 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
242 			npages = uvmexp.npages;
243 			extrapages = uvm_extrapages;
244 			uvmpd_tune();
245 		}
246 
247 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
248 		if (uvmexp.inactarg <= uvmexp.freetarg) {
249 			uvmexp.inactarg = uvmexp.freetarg + 1;
250 		}
251 
252 		/*
253 		 * Estimate a hint.  Note that bufmem are returned to
254 		 * system only when entire pool page is empty.
255 		 */
256 		bufcnt = uvmexp.freetarg - uvmexp.free;
257 		if (bufcnt < 0)
258 			bufcnt = 0;
259 
260 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
261 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
262 		    uvmexp.inactarg);
263 
264 		/*
265 		 * scan if needed
266 		 */
267 
268 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
269 		    uvmexp.inactive < uvmexp.inactarg) {
270 			uvmpd_scan();
271 		}
272 
273 		/*
274 		 * if there's any free memory to be had,
275 		 * wake up any waiters.
276 		 */
277 
278 		if (uvmexp.free > uvmexp.reserve_kernel ||
279 		    uvmexp.paging == 0) {
280 			wakeup(&uvmexp.free);
281 		}
282 
283 		/*
284 		 * scan done.  unlock page queues (the only lock we are holding)
285 		 */
286 
287 		uvm_unlock_pageq();
288 
289 		buf_drain(bufcnt << PAGE_SHIFT);
290 
291 		/*
292 		 * drain pool resources now that we're not holding any locks
293 		 */
294 
295 		pool_drain(0);
296 
297 		/*
298 		 * free any cached u-areas we don't need
299 		 */
300 		uvm_uarea_drain(TRUE);
301 
302 	}
303 	/*NOTREACHED*/
304 }
305 
306 
307 /*
308  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
309  */
310 
311 void
312 uvm_aiodone_daemon(void *arg)
313 {
314 	int s, free;
315 	struct buf *bp, *nbp;
316 	UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
317 
318 	for (;;) {
319 
320 		/*
321 		 * carefully attempt to go to sleep (without losing "wakeups"!).
322 		 * we need splbio because we want to make sure the aio_done list
323 		 * is totally empty before we go to sleep.
324 		 */
325 
326 		s = splbio();
327 		simple_lock(&uvm.aiodoned_lock);
328 		if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
329 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
330 			UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
331 			    &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
332 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
333 
334 			/* relock aiodoned_lock, still at splbio */
335 			simple_lock(&uvm.aiodoned_lock);
336 		}
337 
338 		/*
339 		 * check for done aio structures
340 		 */
341 
342 		bp = TAILQ_FIRST(&uvm.aio_done);
343 		if (bp) {
344 			TAILQ_INIT(&uvm.aio_done);
345 		}
346 
347 		simple_unlock(&uvm.aiodoned_lock);
348 		splx(s);
349 
350 		/*
351 		 * process each i/o that's done.
352 		 */
353 
354 		free = uvmexp.free;
355 		while (bp != NULL) {
356 			nbp = TAILQ_NEXT(bp, b_freelist);
357 			(*bp->b_iodone)(bp);
358 			bp = nbp;
359 		}
360 		if (free <= uvmexp.reserve_kernel) {
361 			s = uvm_lock_fpageq();
362 			wakeup(&uvm.pagedaemon);
363 			uvm_unlock_fpageq(s);
364 		} else {
365 			simple_lock(&uvm.pagedaemon_lock);
366 			wakeup(&uvmexp.free);
367 			simple_unlock(&uvm.pagedaemon_lock);
368 		}
369 	}
370 }
371 
372 /*
373  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
374  *
375  * => called with page queues locked
376  * => we work on meeting our free target by converting inactive pages
377  *    into free pages.
378  * => we handle the building of swap-backed clusters
379  * => we return TRUE if we are exiting because we met our target
380  */
381 
382 static void
383 uvmpd_scan_inactive(struct pglist *pglst)
384 {
385 	struct vm_page *p, *nextpg = NULL; /* Quell compiler warning */
386 	struct uvm_object *uobj;
387 	struct vm_anon *anon;
388 #if defined(VMSWAP)
389 	struct vm_page *swpps[round_page(MAXPHYS) >> PAGE_SHIFT];
390 	int error;
391 	int result;
392 #endif /* defined(VMSWAP) */
393 	struct simplelock *slock;
394 	int swnpages, swcpages;
395 	int swslot;
396 	int dirtyreacts, t;
397 	boolean_t anonunder, fileunder, execunder;
398 	boolean_t anonover, fileover, execover;
399 	boolean_t anonreact, filereact, execreact;
400 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
401 
402 	/*
403 	 * swslot is non-zero if we are building a swap cluster.  we want
404 	 * to stay in the loop while we have a page to scan or we have
405 	 * a swap-cluster to build.
406 	 */
407 
408 	swslot = 0;
409 	swnpages = swcpages = 0;
410 	dirtyreacts = 0;
411 
412 	/*
413 	 * decide which types of pages we want to reactivate instead of freeing
414 	 * to keep usage within the minimum and maximum usage limits.
415 	 */
416 
417 	t = uvmexp.active + uvmexp.inactive + uvmexp.free;
418 	anonunder = (uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8);
419 	fileunder = (uvmexp.filepages <= (t * uvmexp.filemin) >> 8);
420 	execunder = (uvmexp.execpages <= (t * uvmexp.execmin) >> 8);
421 	anonover = uvmexp.anonpages > ((t * uvmexp.anonmax) >> 8);
422 	fileover = uvmexp.filepages > ((t * uvmexp.filemax) >> 8);
423 	execover = uvmexp.execpages > ((t * uvmexp.execmax) >> 8);
424 	anonreact = anonunder || (!anonover && (fileover || execover));
425 	filereact = fileunder || (!fileover && (anonover || execover));
426 	execreact = execunder || (!execover && (anonover || fileover));
427 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
428 		anonreact = filereact = execreact = FALSE;
429 	}
430 #if !defined(VMSWAP)
431 	/*
432 	 * XXX no point to put swap-backed pages on the page queue.
433 	 */
434 
435 	anonreact = TRUE;
436 #endif /* !defined(VMSWAP) */
437 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
438 		uobj = NULL;
439 		anon = NULL;
440 		if (p) {
441 
442 			/*
443 			 * see if we've met the free target.
444 			 */
445 
446 			if (uvmexp.free + uvmexp.paging >=
447 			    uvmexp.freetarg << 2 ||
448 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
449 				UVMHIST_LOG(pdhist,"  met free target: "
450 					    "exit loop", 0, 0, 0, 0);
451 
452 				if (swslot == 0) {
453 					/* exit now if no swap-i/o pending */
454 					break;
455 				}
456 
457 				/* set p to null to signal final swap i/o */
458 				p = NULL;
459 				nextpg = NULL;
460 			}
461 		}
462 		if (p) {	/* if (we have a new page to consider) */
463 
464 			/*
465 			 * we are below target and have a new page to consider.
466 			 */
467 
468 			uvmexp.pdscans++;
469 			nextpg = TAILQ_NEXT(p, pageq);
470 
471 			/*
472 			 * move referenced pages back to active queue and
473 			 * skip to next page.
474 			 */
475 
476 			if (pmap_clear_reference(p)) {
477 				uvm_pageactivate(p);
478 				uvmexp.pdreact++;
479 				continue;
480 			}
481 			anon = p->uanon;
482 			uobj = p->uobject;
483 
484 			/*
485 			 * enforce the minimum thresholds on different
486 			 * types of memory usage.  if reusing the current
487 			 * page would reduce that type of usage below its
488 			 * minimum, reactivate the page instead and move
489 			 * on to the next page.
490 			 */
491 
492 			if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) {
493 				uvm_pageactivate(p);
494 				uvmexp.pdreexec++;
495 				continue;
496 			}
497 			if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
498 			    !UVM_OBJ_IS_VTEXT(uobj) && filereact) {
499 				uvm_pageactivate(p);
500 				uvmexp.pdrefile++;
501 				continue;
502 			}
503 			if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && anonreact) {
504 				uvm_pageactivate(p);
505 				uvmexp.pdreanon++;
506 				continue;
507 			}
508 
509 			/*
510 			 * first we attempt to lock the object that this page
511 			 * belongs to.  if our attempt fails we skip on to
512 			 * the next page (no harm done).  it is important to
513 			 * "try" locking the object as we are locking in the
514 			 * wrong order (pageq -> object) and we don't want to
515 			 * deadlock.
516 			 *
517 			 * the only time we expect to see an ownerless page
518 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
519 			 * anon has loaned a page from a uvm_object and the
520 			 * uvm_object has dropped the ownership.  in that
521 			 * case, the anon can "take over" the loaned page
522 			 * and make it its own.
523 			 */
524 
525 			/* does the page belong to an object? */
526 			if (uobj != NULL) {
527 				slock = &uobj->vmobjlock;
528 				if (!simple_lock_try(slock)) {
529 					continue;
530 				}
531 				if (p->flags & PG_BUSY) {
532 					simple_unlock(slock);
533 					uvmexp.pdbusy++;
534 					continue;
535 				}
536 				uvmexp.pdobscan++;
537 			} else {
538 #if defined(VMSWAP)
539 				KASSERT(anon != NULL);
540 				slock = &anon->an_lock;
541 				if (!simple_lock_try(slock)) {
542 					continue;
543 				}
544 
545 				/*
546 				 * set PQ_ANON if it isn't set already.
547 				 */
548 
549 				if ((p->pqflags & PQ_ANON) == 0) {
550 					KASSERT(p->loan_count > 0);
551 					p->loan_count--;
552 					p->pqflags |= PQ_ANON;
553 					/* anon now owns it */
554 				}
555 				if (p->flags & PG_BUSY) {
556 					simple_unlock(slock);
557 					uvmexp.pdbusy++;
558 					continue;
559 				}
560 				uvmexp.pdanscan++;
561 #else /* defined(VMSWAP) */
562 				panic("%s: anon", __func__);
563 #endif /* defined(VMSWAP) */
564 			}
565 
566 
567 			/*
568 			 * we now have the object and the page queues locked.
569 			 * if the page is not swap-backed, call the object's
570 			 * pager to flush and free the page.
571 			 */
572 
573 			if ((p->pqflags & PQ_SWAPBACKED) == 0) {
574 				uvm_unlock_pageq();
575 				(void) (uobj->pgops->pgo_put)(uobj, p->offset,
576 				    p->offset + PAGE_SIZE,
577 				    PGO_CLEANIT|PGO_FREE);
578 				uvm_lock_pageq();
579 				if (nextpg &&
580 				    (nextpg->pqflags & PQ_INACTIVE) == 0) {
581 					nextpg = TAILQ_FIRST(pglst);
582 				}
583 				continue;
584 			}
585 
586 #if defined(VMSWAP)
587 			/*
588 			 * the page is swap-backed.  remove all the permissions
589 			 * from the page so we can sync the modified info
590 			 * without any race conditions.  if the page is clean
591 			 * we can free it now and continue.
592 			 */
593 
594 			pmap_page_protect(p, VM_PROT_NONE);
595 			if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
596 				p->flags &= ~(PG_CLEAN);
597 			}
598 			if (p->flags & PG_CLEAN) {
599 				int slot;
600 				int pageidx;
601 
602 				pageidx = p->offset >> PAGE_SHIFT;
603 				uvm_pagefree(p);
604 				uvmexp.pdfreed++;
605 
606 				/*
607 				 * for anons, we need to remove the page
608 				 * from the anon ourselves.  for aobjs,
609 				 * pagefree did that for us.
610 				 */
611 
612 				if (anon) {
613 					KASSERT(anon->an_swslot != 0);
614 					anon->an_page = NULL;
615 					slot = anon->an_swslot;
616 				} else {
617 					slot = uao_find_swslot(uobj, pageidx);
618 				}
619 				simple_unlock(slock);
620 
621 				if (slot > 0) {
622 					/* this page is now only in swap. */
623 					simple_lock(&uvm.swap_data_lock);
624 					KASSERT(uvmexp.swpgonly <
625 						uvmexp.swpginuse);
626 					uvmexp.swpgonly++;
627 					simple_unlock(&uvm.swap_data_lock);
628 				}
629 				continue;
630 			}
631 
632 			/*
633 			 * this page is dirty, skip it if we'll have met our
634 			 * free target when all the current pageouts complete.
635 			 */
636 
637 			if (uvmexp.free + uvmexp.paging >
638 			    uvmexp.freetarg << 2) {
639 				simple_unlock(slock);
640 				continue;
641 			}
642 
643 			/*
644 			 * free any swap space allocated to the page since
645 			 * we'll have to write it again with its new data.
646 			 */
647 
648 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
649 				uvm_swap_free(anon->an_swslot, 1);
650 				anon->an_swslot = 0;
651 			} else if (p->pqflags & PQ_AOBJ) {
652 				uao_dropswap(uobj, p->offset >> PAGE_SHIFT);
653 			}
654 
655 			/*
656 			 * if all pages in swap are only in swap,
657 			 * the swap space is full and we can't page out
658 			 * any more swap-backed pages.  reactivate this page
659 			 * so that we eventually cycle all pages through
660 			 * the inactive queue.
661 			 */
662 
663 			if (uvm_swapisfull()) {
664 				dirtyreacts++;
665 				uvm_pageactivate(p);
666 				simple_unlock(slock);
667 				continue;
668 			}
669 
670 			/*
671 			 * start new swap pageout cluster (if necessary).
672 			 */
673 
674 			if (swslot == 0) {
675 				/* Even with strange MAXPHYS, the shift
676 				   implicitly rounds down to a page. */
677 				swnpages = MAXPHYS >> PAGE_SHIFT;
678 				swslot = uvm_swap_alloc(&swnpages, TRUE);
679 				if (swslot == 0) {
680 					simple_unlock(slock);
681 					continue;
682 				}
683 				swcpages = 0;
684 			}
685 
686 			/*
687 			 * at this point, we're definitely going reuse this
688 			 * page.  mark the page busy and delayed-free.
689 			 * we should remove the page from the page queues
690 			 * so we don't ever look at it again.
691 			 * adjust counters and such.
692 			 */
693 
694 			p->flags |= PG_BUSY;
695 			UVM_PAGE_OWN(p, "scan_inactive");
696 
697 			p->flags |= PG_PAGEOUT;
698 			uvmexp.paging++;
699 			uvm_pagedequeue(p);
700 
701 			uvmexp.pgswapout++;
702 
703 			/*
704 			 * add the new page to the cluster.
705 			 */
706 
707 			if (anon) {
708 				anon->an_swslot = swslot + swcpages;
709 				simple_unlock(slock);
710 			} else {
711 				result = uao_set_swslot(uobj,
712 				    p->offset >> PAGE_SHIFT, swslot + swcpages);
713 				if (result == -1) {
714 					p->flags &= ~(PG_BUSY|PG_PAGEOUT);
715 					UVM_PAGE_OWN(p, NULL);
716 					uvmexp.paging--;
717 					uvm_pageactivate(p);
718 					simple_unlock(slock);
719 					continue;
720 				}
721 				simple_unlock(slock);
722 			}
723 			swpps[swcpages] = p;
724 			swcpages++;
725 
726 			/*
727 			 * if the cluster isn't full, look for more pages
728 			 * before starting the i/o.
729 			 */
730 
731 			if (swcpages < swnpages) {
732 				continue;
733 			}
734 #else /* defined(VMSWAP) */
735 			panic("%s: swap-backed", __func__);
736 #endif /* defined(VMSWAP) */
737 
738 		}
739 
740 #if defined(VMSWAP)
741 		/*
742 		 * if this is the final pageout we could have a few
743 		 * unused swap blocks.  if so, free them now.
744 		 */
745 
746 		if (swcpages < swnpages) {
747 			uvm_swap_free(swslot + swcpages, (swnpages - swcpages));
748 		}
749 
750 		/*
751 		 * now start the pageout.
752 		 */
753 
754 		uvm_unlock_pageq();
755 		uvmexp.pdpageouts++;
756 		error = uvm_swap_put(swslot, swpps, swcpages, 0);
757 		KASSERT(error == 0);
758 		uvm_lock_pageq();
759 
760 		/*
761 		 * zero swslot to indicate that we are
762 		 * no longer building a swap-backed cluster.
763 		 */
764 
765 		swslot = 0;
766 
767 		/*
768 		 * the pageout is in progress.  bump counters and set up
769 		 * for the next loop.
770 		 */
771 
772 		uvmexp.pdpending++;
773 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
774 			nextpg = TAILQ_FIRST(pglst);
775 		}
776 #endif /* defined(VMSWAP) */
777 	}
778 }
779 
780 /*
781  * uvmpd_scan: scan the page queues and attempt to meet our targets.
782  *
783  * => called with pageq's locked
784  */
785 
786 static void
787 uvmpd_scan(void)
788 {
789 	int inactive_shortage, swap_shortage, pages_freed;
790 	struct vm_page *p, *nextpg;
791 	struct uvm_object *uobj;
792 	struct vm_anon *anon;
793 	struct simplelock *slock;
794 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
795 
796 	uvmexp.pdrevs++;
797 	uobj = NULL;
798 	anon = NULL;
799 
800 #ifndef __SWAP_BROKEN
801 
802 	/*
803 	 * swap out some processes if we are below our free target.
804 	 * we need to unlock the page queues for this.
805 	 */
806 
807 	if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
808 		uvmexp.pdswout++;
809 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
810 		    uvmexp.free, uvmexp.freetarg, 0, 0);
811 		uvm_unlock_pageq();
812 		uvm_swapout_threads();
813 		uvm_lock_pageq();
814 
815 	}
816 #endif
817 
818 	/*
819 	 * now we want to work on meeting our targets.   first we work on our
820 	 * free target by converting inactive pages into free pages.  then
821 	 * we work on meeting our inactive target by converting active pages
822 	 * to inactive ones.
823 	 */
824 
825 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
826 
827 	pages_freed = uvmexp.pdfreed;
828 	uvmpd_scan_inactive(&uvm.page_inactive);
829 	pages_freed = uvmexp.pdfreed - pages_freed;
830 
831 	/*
832 	 * we have done the scan to get free pages.   now we work on meeting
833 	 * our inactive target.
834 	 */
835 
836 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
837 
838 	/*
839 	 * detect if we're not going to be able to page anything out
840 	 * until we free some swap resources from active pages.
841 	 */
842 
843 	swap_shortage = 0;
844 	if (uvmexp.free < uvmexp.freetarg &&
845 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
846 	    !uvm_swapisfull() &&
847 	    pages_freed == 0) {
848 		swap_shortage = uvmexp.freetarg - uvmexp.free;
849 	}
850 
851 	UVMHIST_LOG(pdhist, "  loop 2: inactive_shortage=%d swap_shortage=%d",
852 		    inactive_shortage, swap_shortage,0,0);
853 	for (p = TAILQ_FIRST(&uvm.page_active);
854 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
855 	     p = nextpg) {
856 		nextpg = TAILQ_NEXT(p, pageq);
857 		if (p->flags & PG_BUSY) {
858 			continue;
859 		}
860 
861 		/*
862 		 * lock the page's owner.
863 		 */
864 
865 		if (p->uobject != NULL) {
866 			uobj = p->uobject;
867 			slock = &uobj->vmobjlock;
868 			if (!simple_lock_try(slock)) {
869 				continue;
870 			}
871 		} else {
872 			anon = p->uanon;
873 			KASSERT(anon != NULL);
874 			slock = &anon->an_lock;
875 			if (!simple_lock_try(slock)) {
876 				continue;
877 			}
878 
879 			/* take over the page? */
880 			if ((p->pqflags & PQ_ANON) == 0) {
881 				KASSERT(p->loan_count > 0);
882 				p->loan_count--;
883 				p->pqflags |= PQ_ANON;
884 			}
885 		}
886 
887 		/*
888 		 * skip this page if it's busy.
889 		 */
890 
891 		if ((p->flags & PG_BUSY) != 0) {
892 			simple_unlock(slock);
893 			continue;
894 		}
895 
896 #if defined(VMSWAP)
897 		/*
898 		 * if there's a shortage of swap, free any swap allocated
899 		 * to this page so that other pages can be paged out.
900 		 */
901 
902 		if (swap_shortage > 0) {
903 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
904 				uvm_swap_free(anon->an_swslot, 1);
905 				anon->an_swslot = 0;
906 				p->flags &= ~PG_CLEAN;
907 				swap_shortage--;
908 			} else if (p->pqflags & PQ_AOBJ) {
909 				int slot = uao_set_swslot(uobj,
910 					p->offset >> PAGE_SHIFT, 0);
911 				if (slot) {
912 					uvm_swap_free(slot, 1);
913 					p->flags &= ~PG_CLEAN;
914 					swap_shortage--;
915 				}
916 			}
917 		}
918 #endif /* defined(VMSWAP) */
919 
920 		/*
921 		 * if there's a shortage of inactive pages, deactivate.
922 		 */
923 
924 		if (inactive_shortage > 0) {
925 			/* no need to check wire_count as pg is "active" */
926 			uvm_pagedeactivate(p);
927 			uvmexp.pddeact++;
928 			inactive_shortage--;
929 		}
930 
931 		/*
932 		 * we're done with this page.
933 		 */
934 
935 		simple_unlock(slock);
936 	}
937 }
938 
939 /*
940  * uvm_reclaimable: decide whether to wait for pagedaemon.
941  *
942  * => return TRUE if it seems to be worth to do uvm_wait.
943  *
944  * XXX should be tunable.
945  * XXX should consider pools, etc?
946  */
947 
948 boolean_t
949 uvm_reclaimable(void)
950 {
951 	int filepages;
952 
953 	/*
954 	 * if swap is not full, no problem.
955 	 */
956 
957 	if (!uvm_swapisfull()) {
958 		return TRUE;
959 	}
960 
961 	/*
962 	 * file-backed pages can be reclaimed even when swap is full.
963 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
964 	 *
965 	 * XXX assume the worst case, ie. all wired pages are file-backed.
966 	 *
967 	 * XXX should consider about other reclaimable memory.
968 	 * XXX ie. pools, traditional buffer cache.
969 	 */
970 
971 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
972 	if (filepages >= MIN((uvmexp.active + uvmexp.inactive) >> 4,
973 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
974 		return TRUE;
975 	}
976 
977 	/*
978 	 * kill the process, fail allocation, etc..
979 	 */
980 
981 	return FALSE;
982 }
983