xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 /*
70  * uvm_pdaemon.c: the page daemon
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $");
75 
76 #include "opt_uvmhist.h"
77 #include "opt_readahead.h"
78 
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 #include <sys/buf.h>
85 #include <sys/vnode.h>
86 
87 #include <uvm/uvm.h>
88 
89 /*
90  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
91  * in a pass thru the inactive list when swap is full.  the value should be
92  * "small"... if it's too large we'll cycle the active pages thru the inactive
93  * queue too quickly to for them to be referenced and avoid being freed.
94  */
95 
96 #define UVMPD_NUMDIRTYREACTS 16
97 
98 
99 /*
100  * local prototypes
101  */
102 
103 static void	uvmpd_scan(void);
104 static void	uvmpd_scan_inactive(struct pglist *);
105 static void	uvmpd_tune(void);
106 
107 /*
108  * XXX hack to avoid hangs when large processes fork.
109  */
110 int uvm_extrapages;
111 
112 /*
113  * uvm_wait: wait (sleep) for the page daemon to free some pages
114  *
115  * => should be called with all locks released
116  * => should _not_ be called by the page daemon (to avoid deadlock)
117  */
118 
119 void
120 uvm_wait(const char *wmsg)
121 {
122 	int timo = 0;
123 	int s = splbio();
124 
125 	/*
126 	 * check for page daemon going to sleep (waiting for itself)
127 	 */
128 
129 	if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
130 		/*
131 		 * now we have a problem: the pagedaemon wants to go to
132 		 * sleep until it frees more memory.   but how can it
133 		 * free more memory if it is asleep?  that is a deadlock.
134 		 * we have two options:
135 		 *  [1] panic now
136 		 *  [2] put a timeout on the sleep, thus causing the
137 		 *      pagedaemon to only pause (rather than sleep forever)
138 		 *
139 		 * note that option [2] will only help us if we get lucky
140 		 * and some other process on the system breaks the deadlock
141 		 * by exiting or freeing memory (thus allowing the pagedaemon
142 		 * to continue).  for now we panic if DEBUG is defined,
143 		 * otherwise we hope for the best with option [2] (better
144 		 * yet, this should never happen in the first place!).
145 		 */
146 
147 		printf("pagedaemon: deadlock detected!\n");
148 		timo = hz >> 3;		/* set timeout */
149 #if defined(DEBUG)
150 		/* DEBUG: panic so we can debug it */
151 		panic("pagedaemon deadlock");
152 #endif
153 	}
154 
155 	simple_lock(&uvm.pagedaemon_lock);
156 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
157 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
158 	    timo);
159 
160 	splx(s);
161 }
162 
163 
164 /*
165  * uvmpd_tune: tune paging parameters
166  *
167  * => called when ever memory is added (or removed?) to the system
168  * => caller must call with page queues locked
169  */
170 
171 static void
172 uvmpd_tune(void)
173 {
174 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
175 
176 	uvmexp.freemin = uvmexp.npages / 20;
177 
178 	/* between 16k and 256k */
179 	/* XXX:  what are these values good for? */
180 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
181 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
182 
183 	/* Make sure there's always a user page free. */
184 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
185 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
186 
187 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
188 	if (uvmexp.freetarg <= uvmexp.freemin)
189 		uvmexp.freetarg = uvmexp.freemin + 1;
190 
191 	uvmexp.freetarg += uvm_extrapages;
192 	uvm_extrapages = 0;
193 
194 	/* uvmexp.inactarg: computed in main daemon loop */
195 
196 	uvmexp.wiredmax = uvmexp.npages / 3;
197 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
198 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
199 }
200 
201 /*
202  * uvm_pageout: the main loop for the pagedaemon
203  */
204 
205 void
206 uvm_pageout(void *arg)
207 {
208 	int bufcnt, npages = 0;
209 	int extrapages = 0;
210 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
211 
212 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
213 
214 	/*
215 	 * ensure correct priority and set paging parameters...
216 	 */
217 
218 	uvm.pagedaemon_proc = curproc;
219 	uvm_lock_pageq();
220 	npages = uvmexp.npages;
221 	uvmpd_tune();
222 	uvm_unlock_pageq();
223 
224 	/*
225 	 * main loop
226 	 */
227 
228 	for (;;) {
229 		simple_lock(&uvm.pagedaemon_lock);
230 
231 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
232 		UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
233 		    &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
234 		uvmexp.pdwoke++;
235 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
236 
237 		/*
238 		 * now lock page queues and recompute inactive count
239 		 */
240 
241 		uvm_lock_pageq();
242 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
243 			npages = uvmexp.npages;
244 			extrapages = uvm_extrapages;
245 			uvmpd_tune();
246 		}
247 
248 		uvmexp.inactarg = UVM_PCTPARAM_APPLY(&uvmexp.inactivepct,
249 		    uvmexp.active + uvmexp.inactive);
250 		if (uvmexp.inactarg <= uvmexp.freetarg) {
251 			uvmexp.inactarg = uvmexp.freetarg + 1;
252 		}
253 
254 		/*
255 		 * Estimate a hint.  Note that bufmem are returned to
256 		 * system only when entire pool page is empty.
257 		 */
258 		bufcnt = uvmexp.freetarg - uvmexp.free;
259 		if (bufcnt < 0)
260 			bufcnt = 0;
261 
262 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
263 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
264 		    uvmexp.inactarg);
265 
266 		/*
267 		 * scan if needed
268 		 */
269 
270 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
271 		    uvmexp.inactive < uvmexp.inactarg) {
272 			uvmpd_scan();
273 		}
274 
275 		/*
276 		 * if there's any free memory to be had,
277 		 * wake up any waiters.
278 		 */
279 
280 		if (uvmexp.free > uvmexp.reserve_kernel ||
281 		    uvmexp.paging == 0) {
282 			wakeup(&uvmexp.free);
283 		}
284 
285 		/*
286 		 * scan done.  unlock page queues (the only lock we are holding)
287 		 */
288 
289 		uvm_unlock_pageq();
290 
291 		buf_drain(bufcnt << PAGE_SHIFT);
292 
293 		/*
294 		 * drain pool resources now that we're not holding any locks
295 		 */
296 
297 		pool_drain(0);
298 
299 		/*
300 		 * free any cached u-areas we don't need
301 		 */
302 		uvm_uarea_drain(TRUE);
303 
304 	}
305 	/*NOTREACHED*/
306 }
307 
308 
309 /*
310  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
311  */
312 
313 void
314 uvm_aiodone_daemon(void *arg)
315 {
316 	int s, free;
317 	struct buf *bp, *nbp;
318 	UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
319 
320 	for (;;) {
321 
322 		/*
323 		 * carefully attempt to go to sleep (without losing "wakeups"!).
324 		 * we need splbio because we want to make sure the aio_done list
325 		 * is totally empty before we go to sleep.
326 		 */
327 
328 		s = splbio();
329 		simple_lock(&uvm.aiodoned_lock);
330 		if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
331 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
332 			UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
333 			    &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
334 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
335 
336 			/* relock aiodoned_lock, still at splbio */
337 			simple_lock(&uvm.aiodoned_lock);
338 		}
339 
340 		/*
341 		 * check for done aio structures
342 		 */
343 
344 		bp = TAILQ_FIRST(&uvm.aio_done);
345 		if (bp) {
346 			TAILQ_INIT(&uvm.aio_done);
347 		}
348 
349 		simple_unlock(&uvm.aiodoned_lock);
350 		splx(s);
351 
352 		/*
353 		 * process each i/o that's done.
354 		 */
355 
356 		free = uvmexp.free;
357 		while (bp != NULL) {
358 			nbp = TAILQ_NEXT(bp, b_freelist);
359 			(*bp->b_iodone)(bp);
360 			bp = nbp;
361 		}
362 		if (free <= uvmexp.reserve_kernel) {
363 			s = uvm_lock_fpageq();
364 			wakeup(&uvm.pagedaemon);
365 			uvm_unlock_fpageq(s);
366 		} else {
367 			simple_lock(&uvm.pagedaemon_lock);
368 			wakeup(&uvmexp.free);
369 			simple_unlock(&uvm.pagedaemon_lock);
370 		}
371 	}
372 }
373 
374 /*
375  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
376  *
377  * => called with page queues locked
378  * => we work on meeting our free target by converting inactive pages
379  *    into free pages.
380  * => we handle the building of swap-backed clusters
381  * => we return TRUE if we are exiting because we met our target
382  */
383 
384 static void
385 uvmpd_scan_inactive(struct pglist *pglst)
386 {
387 	struct vm_page *p, *nextpg = NULL; /* Quell compiler warning */
388 	struct uvm_object *uobj;
389 	struct vm_anon *anon;
390 #if defined(VMSWAP)
391 	struct vm_page *swpps[round_page(MAXPHYS) >> PAGE_SHIFT];
392 	int error;
393 	int result;
394 #endif /* defined(VMSWAP) */
395 	struct simplelock *slock;
396 	int swnpages, swcpages;
397 	int swslot;
398 	int dirtyreacts, t;
399 	boolean_t anonunder, fileunder, execunder;
400 	boolean_t anonover, fileover, execover;
401 	boolean_t anonreact, filereact, execreact;
402 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
403 
404 	/*
405 	 * swslot is non-zero if we are building a swap cluster.  we want
406 	 * to stay in the loop while we have a page to scan or we have
407 	 * a swap-cluster to build.
408 	 */
409 
410 	swslot = 0;
411 	swnpages = swcpages = 0;
412 	dirtyreacts = 0;
413 
414 	/*
415 	 * decide which types of pages we want to reactivate instead of freeing
416 	 * to keep usage within the minimum and maximum usage limits.
417 	 */
418 
419 	t = uvmexp.active + uvmexp.inactive + uvmexp.free;
420 	anonunder = (uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8);
421 	fileunder = (uvmexp.filepages <= (t * uvmexp.filemin) >> 8);
422 	execunder = (uvmexp.execpages <= (t * uvmexp.execmin) >> 8);
423 	anonover = uvmexp.anonpages > ((t * uvmexp.anonmax) >> 8);
424 	fileover = uvmexp.filepages > ((t * uvmexp.filemax) >> 8);
425 	execover = uvmexp.execpages > ((t * uvmexp.execmax) >> 8);
426 	anonreact = anonunder || (!anonover && (fileover || execover));
427 	filereact = fileunder || (!fileover && (anonover || execover));
428 	execreact = execunder || (!execover && (anonover || fileover));
429 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
430 		anonreact = filereact = execreact = FALSE;
431 	}
432 #if !defined(VMSWAP)
433 	/*
434 	 * XXX no point to put swap-backed pages on the page queue.
435 	 */
436 
437 	anonreact = TRUE;
438 #endif /* !defined(VMSWAP) */
439 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
440 		uobj = NULL;
441 		anon = NULL;
442 		if (p) {
443 
444 			/*
445 			 * see if we've met the free target.
446 			 */
447 
448 			if (uvmexp.free + uvmexp.paging >=
449 			    uvmexp.freetarg << 2 ||
450 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
451 				UVMHIST_LOG(pdhist,"  met free target: "
452 					    "exit loop", 0, 0, 0, 0);
453 
454 				if (swslot == 0) {
455 					/* exit now if no swap-i/o pending */
456 					break;
457 				}
458 
459 				/* set p to null to signal final swap i/o */
460 				p = NULL;
461 				nextpg = NULL;
462 			}
463 		}
464 		if (p) {	/* if (we have a new page to consider) */
465 
466 			/*
467 			 * we are below target and have a new page to consider.
468 			 */
469 
470 			uvmexp.pdscans++;
471 			nextpg = TAILQ_NEXT(p, pageq);
472 
473 			/*
474 			 * move referenced pages back to active queue and
475 			 * skip to next page.
476 			 */
477 
478 			if (pmap_is_referenced(p)) {
479 				uvm_pageactivate(p);
480 				uvmexp.pdreact++;
481 				continue;
482 			}
483 			anon = p->uanon;
484 			uobj = p->uobject;
485 
486 			/*
487 			 * enforce the minimum thresholds on different
488 			 * types of memory usage.  if reusing the current
489 			 * page would reduce that type of usage below its
490 			 * minimum, reactivate the page instead and move
491 			 * on to the next page.
492 			 */
493 
494 			if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) {
495 				uvm_pageactivate(p);
496 				uvmexp.pdreexec++;
497 				continue;
498 			}
499 			if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
500 			    !UVM_OBJ_IS_VTEXT(uobj) && filereact) {
501 				uvm_pageactivate(p);
502 				uvmexp.pdrefile++;
503 				continue;
504 			}
505 			if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && anonreact) {
506 				uvm_pageactivate(p);
507 				uvmexp.pdreanon++;
508 				continue;
509 			}
510 
511 			/*
512 			 * first we attempt to lock the object that this page
513 			 * belongs to.  if our attempt fails we skip on to
514 			 * the next page (no harm done).  it is important to
515 			 * "try" locking the object as we are locking in the
516 			 * wrong order (pageq -> object) and we don't want to
517 			 * deadlock.
518 			 *
519 			 * the only time we expect to see an ownerless page
520 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
521 			 * anon has loaned a page from a uvm_object and the
522 			 * uvm_object has dropped the ownership.  in that
523 			 * case, the anon can "take over" the loaned page
524 			 * and make it its own.
525 			 */
526 
527 			/* does the page belong to an object? */
528 			if (uobj != NULL) {
529 				slock = &uobj->vmobjlock;
530 				if (!simple_lock_try(slock)) {
531 					continue;
532 				}
533 				if (p->flags & PG_BUSY) {
534 					simple_unlock(slock);
535 					uvmexp.pdbusy++;
536 					continue;
537 				}
538 				uvmexp.pdobscan++;
539 			} else {
540 #if defined(VMSWAP)
541 				KASSERT(anon != NULL);
542 				slock = &anon->an_lock;
543 				if (!simple_lock_try(slock)) {
544 					continue;
545 				}
546 
547 				/*
548 				 * set PQ_ANON if it isn't set already.
549 				 */
550 
551 				if ((p->pqflags & PQ_ANON) == 0) {
552 					KASSERT(p->loan_count > 0);
553 					p->loan_count--;
554 					p->pqflags |= PQ_ANON;
555 					/* anon now owns it */
556 				}
557 				if (p->flags & PG_BUSY) {
558 					simple_unlock(slock);
559 					uvmexp.pdbusy++;
560 					continue;
561 				}
562 				uvmexp.pdanscan++;
563 #else /* defined(VMSWAP) */
564 				panic("%s: anon", __func__);
565 #endif /* defined(VMSWAP) */
566 			}
567 
568 
569 			/*
570 			 * we now have the object and the page queues locked.
571 			 * if the page is not swap-backed, call the object's
572 			 * pager to flush and free the page.
573 			 */
574 
575 #if defined(READAHEAD_STATS)
576 			if ((p->flags & PG_SPECULATIVE) != 0) {
577 				p->flags &= ~PG_SPECULATIVE;
578 				uvm_ra_miss.ev_count++;
579 			}
580 #endif /* defined(READAHEAD_STATS) */
581 
582 			if ((p->pqflags & PQ_SWAPBACKED) == 0) {
583 				uvm_unlock_pageq();
584 				(void) (uobj->pgops->pgo_put)(uobj, p->offset,
585 				    p->offset + PAGE_SIZE,
586 				    PGO_CLEANIT|PGO_FREE);
587 				uvm_lock_pageq();
588 				if (nextpg &&
589 				    (nextpg->pqflags & PQ_INACTIVE) == 0) {
590 					nextpg = TAILQ_FIRST(pglst);
591 				}
592 				continue;
593 			}
594 
595 #if defined(VMSWAP)
596 			/*
597 			 * the page is swap-backed.  remove all the permissions
598 			 * from the page so we can sync the modified info
599 			 * without any race conditions.  if the page is clean
600 			 * we can free it now and continue.
601 			 */
602 
603 			pmap_page_protect(p, VM_PROT_NONE);
604 			if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
605 				p->flags &= ~(PG_CLEAN);
606 			}
607 			if (p->flags & PG_CLEAN) {
608 				int slot;
609 				int pageidx;
610 
611 				pageidx = p->offset >> PAGE_SHIFT;
612 				uvm_pagefree(p);
613 				uvmexp.pdfreed++;
614 
615 				/*
616 				 * for anons, we need to remove the page
617 				 * from the anon ourselves.  for aobjs,
618 				 * pagefree did that for us.
619 				 */
620 
621 				if (anon) {
622 					KASSERT(anon->an_swslot != 0);
623 					anon->an_page = NULL;
624 					slot = anon->an_swslot;
625 				} else {
626 					slot = uao_find_swslot(uobj, pageidx);
627 				}
628 				simple_unlock(slock);
629 
630 				if (slot > 0) {
631 					/* this page is now only in swap. */
632 					simple_lock(&uvm.swap_data_lock);
633 					KASSERT(uvmexp.swpgonly <
634 						uvmexp.swpginuse);
635 					uvmexp.swpgonly++;
636 					simple_unlock(&uvm.swap_data_lock);
637 				}
638 				continue;
639 			}
640 
641 			/*
642 			 * this page is dirty, skip it if we'll have met our
643 			 * free target when all the current pageouts complete.
644 			 */
645 
646 			if (uvmexp.free + uvmexp.paging >
647 			    uvmexp.freetarg << 2) {
648 				simple_unlock(slock);
649 				continue;
650 			}
651 
652 			/*
653 			 * free any swap space allocated to the page since
654 			 * we'll have to write it again with its new data.
655 			 */
656 
657 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
658 				uvm_swap_free(anon->an_swslot, 1);
659 				anon->an_swslot = 0;
660 			} else if (p->pqflags & PQ_AOBJ) {
661 				uao_dropswap(uobj, p->offset >> PAGE_SHIFT);
662 			}
663 
664 			/*
665 			 * if all pages in swap are only in swap,
666 			 * the swap space is full and we can't page out
667 			 * any more swap-backed pages.  reactivate this page
668 			 * so that we eventually cycle all pages through
669 			 * the inactive queue.
670 			 */
671 
672 			if (uvm_swapisfull()) {
673 				dirtyreacts++;
674 				uvm_pageactivate(p);
675 				simple_unlock(slock);
676 				continue;
677 			}
678 
679 			/*
680 			 * start new swap pageout cluster (if necessary).
681 			 */
682 
683 			if (swslot == 0) {
684 				/* Even with strange MAXPHYS, the shift
685 				   implicitly rounds down to a page. */
686 				swnpages = MAXPHYS >> PAGE_SHIFT;
687 				swslot = uvm_swap_alloc(&swnpages, TRUE);
688 				if (swslot == 0) {
689 					simple_unlock(slock);
690 					continue;
691 				}
692 				swcpages = 0;
693 			}
694 
695 			/*
696 			 * at this point, we're definitely going reuse this
697 			 * page.  mark the page busy and delayed-free.
698 			 * we should remove the page from the page queues
699 			 * so we don't ever look at it again.
700 			 * adjust counters and such.
701 			 */
702 
703 			p->flags |= PG_BUSY;
704 			UVM_PAGE_OWN(p, "scan_inactive");
705 
706 			p->flags |= PG_PAGEOUT;
707 			uvmexp.paging++;
708 			uvm_pagedequeue(p);
709 
710 			uvmexp.pgswapout++;
711 
712 			/*
713 			 * add the new page to the cluster.
714 			 */
715 
716 			if (anon) {
717 				anon->an_swslot = swslot + swcpages;
718 				simple_unlock(slock);
719 			} else {
720 				result = uao_set_swslot(uobj,
721 				    p->offset >> PAGE_SHIFT, swslot + swcpages);
722 				if (result == -1) {
723 					p->flags &= ~(PG_BUSY|PG_PAGEOUT);
724 					UVM_PAGE_OWN(p, NULL);
725 					uvmexp.paging--;
726 					uvm_pageactivate(p);
727 					simple_unlock(slock);
728 					continue;
729 				}
730 				simple_unlock(slock);
731 			}
732 			swpps[swcpages] = p;
733 			swcpages++;
734 
735 			/*
736 			 * if the cluster isn't full, look for more pages
737 			 * before starting the i/o.
738 			 */
739 
740 			if (swcpages < swnpages) {
741 				continue;
742 			}
743 #else /* defined(VMSWAP) */
744 			panic("%s: swap-backed", __func__);
745 #endif /* defined(VMSWAP) */
746 
747 		}
748 
749 #if defined(VMSWAP)
750 		/*
751 		 * if this is the final pageout we could have a few
752 		 * unused swap blocks.  if so, free them now.
753 		 */
754 
755 		if (swcpages < swnpages) {
756 			uvm_swap_free(swslot + swcpages, (swnpages - swcpages));
757 		}
758 
759 		/*
760 		 * now start the pageout.
761 		 */
762 
763 		uvm_unlock_pageq();
764 		uvmexp.pdpageouts++;
765 		error = uvm_swap_put(swslot, swpps, swcpages, 0);
766 		KASSERT(error == 0);
767 		uvm_lock_pageq();
768 
769 		/*
770 		 * zero swslot to indicate that we are
771 		 * no longer building a swap-backed cluster.
772 		 */
773 
774 		swslot = 0;
775 
776 		/*
777 		 * the pageout is in progress.  bump counters and set up
778 		 * for the next loop.
779 		 */
780 
781 		uvmexp.pdpending++;
782 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
783 			nextpg = TAILQ_FIRST(pglst);
784 		}
785 #endif /* defined(VMSWAP) */
786 	}
787 }
788 
789 /*
790  * uvmpd_scan: scan the page queues and attempt to meet our targets.
791  *
792  * => called with pageq's locked
793  */
794 
795 static void
796 uvmpd_scan(void)
797 {
798 	int inactive_shortage, swap_shortage, pages_freed;
799 	struct vm_page *p, *nextpg;
800 	struct uvm_object *uobj;
801 	struct vm_anon *anon;
802 	struct simplelock *slock;
803 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
804 
805 	uvmexp.pdrevs++;
806 	uobj = NULL;
807 	anon = NULL;
808 
809 #ifndef __SWAP_BROKEN
810 
811 	/*
812 	 * swap out some processes if we are below our free target.
813 	 * we need to unlock the page queues for this.
814 	 */
815 
816 	if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
817 		uvmexp.pdswout++;
818 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
819 		    uvmexp.free, uvmexp.freetarg, 0, 0);
820 		uvm_unlock_pageq();
821 		uvm_swapout_threads();
822 		uvm_lock_pageq();
823 
824 	}
825 #endif
826 
827 	/*
828 	 * now we want to work on meeting our targets.   first we work on our
829 	 * free target by converting inactive pages into free pages.  then
830 	 * we work on meeting our inactive target by converting active pages
831 	 * to inactive ones.
832 	 */
833 
834 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
835 
836 	pages_freed = uvmexp.pdfreed;
837 	uvmpd_scan_inactive(&uvm.page_inactive);
838 	pages_freed = uvmexp.pdfreed - pages_freed;
839 
840 	/*
841 	 * we have done the scan to get free pages.   now we work on meeting
842 	 * our inactive target.
843 	 */
844 
845 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
846 
847 	/*
848 	 * detect if we're not going to be able to page anything out
849 	 * until we free some swap resources from active pages.
850 	 */
851 
852 	swap_shortage = 0;
853 	if (uvmexp.free < uvmexp.freetarg &&
854 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
855 	    !uvm_swapisfull() &&
856 	    pages_freed == 0) {
857 		swap_shortage = uvmexp.freetarg - uvmexp.free;
858 	}
859 
860 	UVMHIST_LOG(pdhist, "  loop 2: inactive_shortage=%d swap_shortage=%d",
861 		    inactive_shortage, swap_shortage,0,0);
862 	for (p = TAILQ_FIRST(&uvm.page_active);
863 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
864 	     p = nextpg) {
865 		nextpg = TAILQ_NEXT(p, pageq);
866 		if (p->flags & PG_BUSY) {
867 			continue;
868 		}
869 
870 		/*
871 		 * lock the page's owner.
872 		 */
873 
874 		if (p->uobject != NULL) {
875 			uobj = p->uobject;
876 			slock = &uobj->vmobjlock;
877 			if (!simple_lock_try(slock)) {
878 				continue;
879 			}
880 		} else {
881 			anon = p->uanon;
882 			KASSERT(anon != NULL);
883 			slock = &anon->an_lock;
884 			if (!simple_lock_try(slock)) {
885 				continue;
886 			}
887 
888 			/* take over the page? */
889 			if ((p->pqflags & PQ_ANON) == 0) {
890 				KASSERT(p->loan_count > 0);
891 				p->loan_count--;
892 				p->pqflags |= PQ_ANON;
893 			}
894 		}
895 
896 		/*
897 		 * skip this page if it's busy.
898 		 */
899 
900 		if ((p->flags & PG_BUSY) != 0) {
901 			simple_unlock(slock);
902 			continue;
903 		}
904 
905 #if defined(VMSWAP)
906 		/*
907 		 * if there's a shortage of swap, free any swap allocated
908 		 * to this page so that other pages can be paged out.
909 		 */
910 
911 		if (swap_shortage > 0) {
912 			if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
913 				uvm_swap_free(anon->an_swslot, 1);
914 				anon->an_swslot = 0;
915 				p->flags &= ~PG_CLEAN;
916 				swap_shortage--;
917 			} else if (p->pqflags & PQ_AOBJ) {
918 				int slot = uao_set_swslot(uobj,
919 					p->offset >> PAGE_SHIFT, 0);
920 				if (slot) {
921 					uvm_swap_free(slot, 1);
922 					p->flags &= ~PG_CLEAN;
923 					swap_shortage--;
924 				}
925 			}
926 		}
927 #endif /* defined(VMSWAP) */
928 
929 		/*
930 		 * if there's a shortage of inactive pages, deactivate.
931 		 */
932 
933 		if (inactive_shortage > 0) {
934 			/* no need to check wire_count as pg is "active" */
935 			pmap_clear_reference(p);
936 			uvm_pagedeactivate(p);
937 			uvmexp.pddeact++;
938 			inactive_shortage--;
939 		}
940 
941 		/*
942 		 * we're done with this page.
943 		 */
944 
945 		simple_unlock(slock);
946 	}
947 }
948 
949 /*
950  * uvm_reclaimable: decide whether to wait for pagedaemon.
951  *
952  * => return TRUE if it seems to be worth to do uvm_wait.
953  *
954  * XXX should be tunable.
955  * XXX should consider pools, etc?
956  */
957 
958 boolean_t
959 uvm_reclaimable(void)
960 {
961 	int filepages;
962 
963 	/*
964 	 * if swap is not full, no problem.
965 	 */
966 
967 	if (!uvm_swapisfull()) {
968 		return TRUE;
969 	}
970 
971 	/*
972 	 * file-backed pages can be reclaimed even when swap is full.
973 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
974 	 *
975 	 * XXX assume the worst case, ie. all wired pages are file-backed.
976 	 *
977 	 * XXX should consider about other reclaimable memory.
978 	 * XXX ie. pools, traditional buffer cache.
979 	 */
980 
981 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
982 	if (filepages >= MIN((uvmexp.active + uvmexp.inactive) >> 4,
983 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
984 		return TRUE;
985 	}
986 
987 	/*
988 	 * kill the process, fail allocation, etc..
989 	 */
990 
991 	return FALSE;
992 }
993