xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.88 2007/11/07 00:23:46 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 /*
70  * uvm_pdaemon.c: the page daemon
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.88 2007/11/07 00:23:46 ad Exp $");
75 
76 #include "opt_uvmhist.h"
77 #include "opt_readahead.h"
78 
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 #include <sys/buf.h>
85 
86 #include <uvm/uvm.h>
87 #include <uvm/uvm_pdpolicy.h>
88 
89 /*
90  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
91  * in a pass thru the inactive list when swap is full.  the value should be
92  * "small"... if it's too large we'll cycle the active pages thru the inactive
93  * queue too quickly to for them to be referenced and avoid being freed.
94  */
95 
96 #define UVMPD_NUMDIRTYREACTS 16
97 
98 
99 /*
100  * local prototypes
101  */
102 
103 static void	uvmpd_scan(void);
104 static void	uvmpd_scan_queue(void);
105 static void	uvmpd_tune(void);
106 
107 /*
108  * XXX hack to avoid hangs when large processes fork.
109  */
110 int uvm_extrapages;
111 
112 /*
113  * uvm_wait: wait (sleep) for the page daemon to free some pages
114  *
115  * => should be called with all locks released
116  * => should _not_ be called by the page daemon (to avoid deadlock)
117  */
118 
119 void
120 uvm_wait(const char *wmsg)
121 {
122 	int timo = 0;
123 	int s = splbio();
124 
125 	/*
126 	 * check for page daemon going to sleep (waiting for itself)
127 	 */
128 
129 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
130 		/*
131 		 * now we have a problem: the pagedaemon wants to go to
132 		 * sleep until it frees more memory.   but how can it
133 		 * free more memory if it is asleep?  that is a deadlock.
134 		 * we have two options:
135 		 *  [1] panic now
136 		 *  [2] put a timeout on the sleep, thus causing the
137 		 *      pagedaemon to only pause (rather than sleep forever)
138 		 *
139 		 * note that option [2] will only help us if we get lucky
140 		 * and some other process on the system breaks the deadlock
141 		 * by exiting or freeing memory (thus allowing the pagedaemon
142 		 * to continue).  for now we panic if DEBUG is defined,
143 		 * otherwise we hope for the best with option [2] (better
144 		 * yet, this should never happen in the first place!).
145 		 */
146 
147 		printf("pagedaemon: deadlock detected!\n");
148 		timo = hz >> 3;		/* set timeout */
149 #if defined(DEBUG)
150 		/* DEBUG: panic so we can debug it */
151 		panic("pagedaemon deadlock");
152 #endif
153 	}
154 
155 	mutex_enter(&uvm_pagedaemon_lock);
156 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
157 	mtsleep(&uvmexp.free, PVM, wmsg, timo, &uvm_pagedaemon_lock);
158 	mutex_exit(&uvm_pagedaemon_lock);
159 
160 	splx(s);
161 }
162 
163 /*
164  * uvm_kick_pdaemon: perform checks to determine if we need to
165  * give the pagedaemon a nudge, and do so if necessary.
166  */
167 
168 void
169 uvm_kick_pdaemon(void)
170 {
171 
172 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
173 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
174 	     uvmpdpol_needsscan_p())) {
175 		wakeup(&uvm.pagedaemon);
176 	}
177 }
178 
179 /*
180  * uvmpd_tune: tune paging parameters
181  *
182  * => called when ever memory is added (or removed?) to the system
183  * => caller must call with page queues locked
184  */
185 
186 static void
187 uvmpd_tune(void)
188 {
189 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
190 
191 	uvmexp.freemin = uvmexp.npages / 20;
192 
193 	/* between 16k and 256k */
194 	/* XXX:  what are these values good for? */
195 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
196 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
197 
198 	/* Make sure there's always a user page free. */
199 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
200 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
201 
202 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
203 	if (uvmexp.freetarg <= uvmexp.freemin)
204 		uvmexp.freetarg = uvmexp.freemin + 1;
205 
206 	uvmexp.freetarg += uvm_extrapages;
207 	uvm_extrapages = 0;
208 
209 	uvmexp.wiredmax = uvmexp.npages / 3;
210 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
211 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
212 }
213 
214 /*
215  * uvm_pageout: the main loop for the pagedaemon
216  */
217 
218 void
219 uvm_pageout(void *arg)
220 {
221 	int bufcnt, npages = 0;
222 	int extrapages = 0;
223 	struct pool *pp;
224 	uint64_t where;
225 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
226 
227 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
228 
229 	/*
230 	 * ensure correct priority and set paging parameters...
231 	 */
232 
233 	uvm.pagedaemon_lwp = curlwp;
234 	uvm_lock_pageq();
235 	npages = uvmexp.npages;
236 	uvmpd_tune();
237 	uvm_unlock_pageq();
238 
239 	/*
240 	 * main loop
241 	 */
242 
243 	for (;;) {
244 		mutex_enter(&uvm_pagedaemon_lock);
245 
246 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
247 		mtsleep(&uvm.pagedaemon, PVM | PNORELOCK, "pgdaemon", 0,
248 		    &uvm_pagedaemon_lock);
249 		uvmexp.pdwoke++;
250 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
251 
252 		/*
253 		 * now lock page queues and recompute inactive count
254 		 */
255 
256 		uvm_lock_pageq();
257 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
258 			npages = uvmexp.npages;
259 			extrapages = uvm_extrapages;
260 			uvmpd_tune();
261 		}
262 
263 		uvmpdpol_tune();
264 
265 		/*
266 		 * Estimate a hint.  Note that bufmem are returned to
267 		 * system only when entire pool page is empty.
268 		 */
269 		bufcnt = uvmexp.freetarg - uvmexp.free;
270 		if (bufcnt < 0)
271 			bufcnt = 0;
272 
273 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d",
274 		    uvmexp.free, uvmexp.freetarg, 0,0);
275 
276 		/*
277 		 * scan if needed
278 		 */
279 
280 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
281 		    uvmpdpol_needsscan_p()) {
282 			uvmpd_scan();
283 		}
284 
285 		/*
286 		 * if there's any free memory to be had,
287 		 * wake up any waiters.
288 		 */
289 
290 		if (uvmexp.free > uvmexp.reserve_kernel ||
291 		    uvmexp.paging == 0) {
292 			wakeup(&uvmexp.free);
293 		}
294 
295 		/*
296 		 * scan done.  unlock page queues (the only lock we are holding)
297 		 */
298 
299 		uvm_unlock_pageq();
300 
301 		/*
302 		 * start draining pool resources now that we're not
303 		 * holding any locks.
304 		 */
305 		pool_drain_start(&pp, &where);
306 
307 		/*
308 		 * kill unused metadata buffers.
309 		 */
310 		buf_drain(bufcnt << PAGE_SHIFT);
311 
312 		/*
313 		 * free any cached u-areas we don't need
314 		 */
315 		uvm_uarea_drain(true);
316 
317 		/*
318 		 * complete draining the pools.
319 		 */
320 		pool_drain_end(pp, where);
321 	}
322 	/*NOTREACHED*/
323 }
324 
325 
326 /*
327  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
328  */
329 
330 void
331 uvm_aiodone_worker(struct work *wk, void *dummy)
332 {
333 	int free;
334 	struct buf *bp = (void *)wk;
335 
336 	KASSERT(&bp->b_work == wk);
337 
338 	/*
339 	 * process an i/o that's done.
340 	 */
341 
342 	free = uvmexp.free;
343 	(*bp->b_iodone)(bp);
344 	if (free <= uvmexp.reserve_kernel) {
345 		mutex_spin_enter(&uvm_fpageqlock);
346 		wakeup(&uvm.pagedaemon);
347 		mutex_spin_exit(&uvm_fpageqlock);
348 	} else {
349 		mutex_enter(&uvm_pagedaemon_lock);
350 		wakeup(&uvmexp.free);
351 		mutex_exit(&uvm_pagedaemon_lock);
352 	}
353 }
354 
355 /*
356  * uvmpd_trylockowner: trylock the page's owner.
357  *
358  * => called with pageq locked.
359  * => resolve orphaned O->A loaned page.
360  * => return the locked simplelock on success.  otherwise, return NULL.
361  */
362 
363 struct simplelock *
364 uvmpd_trylockowner(struct vm_page *pg)
365 {
366 	struct uvm_object *uobj = pg->uobject;
367 	struct simplelock *slock;
368 
369 	UVM_LOCK_ASSERT_PAGEQ();
370 	if (uobj != NULL) {
371 		slock = &uobj->vmobjlock;
372 	} else {
373 		struct vm_anon *anon = pg->uanon;
374 
375 		KASSERT(anon != NULL);
376 		slock = &anon->an_lock;
377 	}
378 
379 	if (!simple_lock_try(slock)) {
380 		return NULL;
381 	}
382 
383 	if (uobj == NULL) {
384 
385 		/*
386 		 * set PQ_ANON if it isn't set already.
387 		 */
388 
389 		if ((pg->pqflags & PQ_ANON) == 0) {
390 			KASSERT(pg->loan_count > 0);
391 			pg->loan_count--;
392 			pg->pqflags |= PQ_ANON;
393 			/* anon now owns it */
394 		}
395 	}
396 
397 	return slock;
398 }
399 
400 #if defined(VMSWAP)
401 struct swapcluster {
402 	int swc_slot;
403 	int swc_nallocated;
404 	int swc_nused;
405 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
406 };
407 
408 static void
409 swapcluster_init(struct swapcluster *swc)
410 {
411 
412 	swc->swc_slot = 0;
413 }
414 
415 static int
416 swapcluster_allocslots(struct swapcluster *swc)
417 {
418 	int slot;
419 	int npages;
420 
421 	if (swc->swc_slot != 0) {
422 		return 0;
423 	}
424 
425 	/* Even with strange MAXPHYS, the shift
426 	   implicitly rounds down to a page. */
427 	npages = MAXPHYS >> PAGE_SHIFT;
428 	slot = uvm_swap_alloc(&npages, true);
429 	if (slot == 0) {
430 		return ENOMEM;
431 	}
432 	swc->swc_slot = slot;
433 	swc->swc_nallocated = npages;
434 	swc->swc_nused = 0;
435 
436 	return 0;
437 }
438 
439 static int
440 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
441 {
442 	int slot;
443 	struct uvm_object *uobj;
444 
445 	KASSERT(swc->swc_slot != 0);
446 	KASSERT(swc->swc_nused < swc->swc_nallocated);
447 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
448 
449 	slot = swc->swc_slot + swc->swc_nused;
450 	uobj = pg->uobject;
451 	if (uobj == NULL) {
452 		LOCK_ASSERT(simple_lock_held(&pg->uanon->an_lock));
453 		pg->uanon->an_swslot = slot;
454 	} else {
455 		int result;
456 
457 		LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
458 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
459 		if (result == -1) {
460 			return ENOMEM;
461 		}
462 	}
463 	swc->swc_pages[swc->swc_nused] = pg;
464 	swc->swc_nused++;
465 
466 	return 0;
467 }
468 
469 static void
470 swapcluster_flush(struct swapcluster *swc, bool now)
471 {
472 	int slot;
473 	int nused;
474 	int nallocated;
475 	int error;
476 
477 	if (swc->swc_slot == 0) {
478 		return;
479 	}
480 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
481 
482 	slot = swc->swc_slot;
483 	nused = swc->swc_nused;
484 	nallocated = swc->swc_nallocated;
485 
486 	/*
487 	 * if this is the final pageout we could have a few
488 	 * unused swap blocks.  if so, free them now.
489 	 */
490 
491 	if (nused < nallocated) {
492 		if (!now) {
493 			return;
494 		}
495 		uvm_swap_free(slot + nused, nallocated - nused);
496 	}
497 
498 	/*
499 	 * now start the pageout.
500 	 */
501 
502 	uvmexp.pdpageouts++;
503 	error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
504 	KASSERT(error == 0);
505 
506 	/*
507 	 * zero swslot to indicate that we are
508 	 * no longer building a swap-backed cluster.
509 	 */
510 
511 	swc->swc_slot = 0;
512 }
513 
514 /*
515  * uvmpd_dropswap: free any swap allocated to this page.
516  *
517  * => called with owner locked.
518  * => return true if a page had an associated slot.
519  */
520 
521 static bool
522 uvmpd_dropswap(struct vm_page *pg)
523 {
524 	bool result = false;
525 	struct vm_anon *anon = pg->uanon;
526 
527 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
528 		uvm_swap_free(anon->an_swslot, 1);
529 		anon->an_swslot = 0;
530 		pg->flags &= ~PG_CLEAN;
531 		result = true;
532 	} else if (pg->pqflags & PQ_AOBJ) {
533 		int slot = uao_set_swslot(pg->uobject,
534 		    pg->offset >> PAGE_SHIFT, 0);
535 		if (slot) {
536 			uvm_swap_free(slot, 1);
537 			pg->flags &= ~PG_CLEAN;
538 			result = true;
539 		}
540 	}
541 
542 	return result;
543 }
544 
545 /*
546  * uvmpd_trydropswap: try to free any swap allocated to this page.
547  *
548  * => return true if a slot is successfully freed.
549  */
550 
551 bool
552 uvmpd_trydropswap(struct vm_page *pg)
553 {
554 	struct simplelock *slock;
555 	bool result;
556 
557 	if ((pg->flags & PG_BUSY) != 0) {
558 		return false;
559 	}
560 
561 	/*
562 	 * lock the page's owner.
563 	 */
564 
565 	slock = uvmpd_trylockowner(pg);
566 	if (slock == NULL) {
567 		return false;
568 	}
569 
570 	/*
571 	 * skip this page if it's busy.
572 	 */
573 
574 	if ((pg->flags & PG_BUSY) != 0) {
575 		simple_unlock(slock);
576 		return false;
577 	}
578 
579 	result = uvmpd_dropswap(pg);
580 
581 	simple_unlock(slock);
582 
583 	return result;
584 }
585 
586 #endif /* defined(VMSWAP) */
587 
588 /*
589  * uvmpd_scan_queue: scan an replace candidate list for pages
590  * to clean or free.
591  *
592  * => called with page queues locked
593  * => we work on meeting our free target by converting inactive pages
594  *    into free pages.
595  * => we handle the building of swap-backed clusters
596  */
597 
598 static void
599 uvmpd_scan_queue(void)
600 {
601 	struct vm_page *p;
602 	struct uvm_object *uobj;
603 	struct vm_anon *anon;
604 #if defined(VMSWAP)
605 	struct swapcluster swc;
606 #endif /* defined(VMSWAP) */
607 	int dirtyreacts;
608 	struct simplelock *slock;
609 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
610 
611 	/*
612 	 * swslot is non-zero if we are building a swap cluster.  we want
613 	 * to stay in the loop while we have a page to scan or we have
614 	 * a swap-cluster to build.
615 	 */
616 
617 #if defined(VMSWAP)
618 	swapcluster_init(&swc);
619 #endif /* defined(VMSWAP) */
620 
621 	dirtyreacts = 0;
622 	uvmpdpol_scaninit();
623 
624 	while (/* CONSTCOND */ 1) {
625 
626 		/*
627 		 * see if we've met the free target.
628 		 */
629 
630 		if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
631 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
632 			UVMHIST_LOG(pdhist,"  met free target: "
633 				    "exit loop", 0, 0, 0, 0);
634 			break;
635 		}
636 
637 		p = uvmpdpol_selectvictim();
638 		if (p == NULL) {
639 			break;
640 		}
641 		KASSERT(uvmpdpol_pageisqueued_p(p));
642 		KASSERT(p->wire_count == 0);
643 
644 		/*
645 		 * we are below target and have a new page to consider.
646 		 */
647 
648 		anon = p->uanon;
649 		uobj = p->uobject;
650 
651 		/*
652 		 * first we attempt to lock the object that this page
653 		 * belongs to.  if our attempt fails we skip on to
654 		 * the next page (no harm done).  it is important to
655 		 * "try" locking the object as we are locking in the
656 		 * wrong order (pageq -> object) and we don't want to
657 		 * deadlock.
658 		 *
659 		 * the only time we expect to see an ownerless page
660 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
661 		 * anon has loaned a page from a uvm_object and the
662 		 * uvm_object has dropped the ownership.  in that
663 		 * case, the anon can "take over" the loaned page
664 		 * and make it its own.
665 		 */
666 
667 		slock = uvmpd_trylockowner(p);
668 		if (slock == NULL) {
669 			continue;
670 		}
671 		if (p->flags & PG_BUSY) {
672 			simple_unlock(slock);
673 			uvmexp.pdbusy++;
674 			continue;
675 		}
676 
677 		/* does the page belong to an object? */
678 		if (uobj != NULL) {
679 			uvmexp.pdobscan++;
680 		} else {
681 #if defined(VMSWAP)
682 			KASSERT(anon != NULL);
683 			uvmexp.pdanscan++;
684 #else /* defined(VMSWAP) */
685 			panic("%s: anon", __func__);
686 #endif /* defined(VMSWAP) */
687 		}
688 
689 
690 		/*
691 		 * we now have the object and the page queues locked.
692 		 * if the page is not swap-backed, call the object's
693 		 * pager to flush and free the page.
694 		 */
695 
696 #if defined(READAHEAD_STATS)
697 		if ((p->pqflags & PQ_READAHEAD) != 0) {
698 			p->pqflags &= ~PQ_READAHEAD;
699 			uvm_ra_miss.ev_count++;
700 		}
701 #endif /* defined(READAHEAD_STATS) */
702 
703 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
704 			KASSERT(uobj != NULL);
705 			uvm_unlock_pageq();
706 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
707 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
708 			uvm_lock_pageq();
709 			continue;
710 		}
711 
712 		/*
713 		 * the page is swap-backed.  remove all the permissions
714 		 * from the page so we can sync the modified info
715 		 * without any race conditions.  if the page is clean
716 		 * we can free it now and continue.
717 		 */
718 
719 		pmap_page_protect(p, VM_PROT_NONE);
720 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
721 			p->flags &= ~(PG_CLEAN);
722 		}
723 		if (p->flags & PG_CLEAN) {
724 			int slot;
725 			int pageidx;
726 
727 			pageidx = p->offset >> PAGE_SHIFT;
728 			uvm_pagefree(p);
729 			uvmexp.pdfreed++;
730 
731 			/*
732 			 * for anons, we need to remove the page
733 			 * from the anon ourselves.  for aobjs,
734 			 * pagefree did that for us.
735 			 */
736 
737 			if (anon) {
738 				KASSERT(anon->an_swslot != 0);
739 				anon->an_page = NULL;
740 				slot = anon->an_swslot;
741 			} else {
742 				slot = uao_find_swslot(uobj, pageidx);
743 			}
744 			simple_unlock(slock);
745 
746 			if (slot > 0) {
747 				/* this page is now only in swap. */
748 				mutex_enter(&uvm_swap_data_lock);
749 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
750 				uvmexp.swpgonly++;
751 				mutex_exit(&uvm_swap_data_lock);
752 			}
753 			continue;
754 		}
755 
756 #if defined(VMSWAP)
757 		/*
758 		 * this page is dirty, skip it if we'll have met our
759 		 * free target when all the current pageouts complete.
760 		 */
761 
762 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
763 			simple_unlock(slock);
764 			continue;
765 		}
766 
767 		/*
768 		 * free any swap space allocated to the page since
769 		 * we'll have to write it again with its new data.
770 		 */
771 
772 		uvmpd_dropswap(p);
773 
774 		/*
775 		 * if all pages in swap are only in swap,
776 		 * the swap space is full and we can't page out
777 		 * any more swap-backed pages.  reactivate this page
778 		 * so that we eventually cycle all pages through
779 		 * the inactive queue.
780 		 */
781 
782 		if (uvm_swapisfull()) {
783 			dirtyreacts++;
784 			uvm_pageactivate(p);
785 			simple_unlock(slock);
786 			continue;
787 		}
788 
789 		/*
790 		 * start new swap pageout cluster (if necessary).
791 		 */
792 
793 		if (swapcluster_allocslots(&swc)) {
794 			simple_unlock(slock);
795 			dirtyreacts++; /* XXX */
796 			continue;
797 		}
798 
799 		/*
800 		 * at this point, we're definitely going reuse this
801 		 * page.  mark the page busy and delayed-free.
802 		 * we should remove the page from the page queues
803 		 * so we don't ever look at it again.
804 		 * adjust counters and such.
805 		 */
806 
807 		p->flags |= PG_BUSY;
808 		UVM_PAGE_OWN(p, "scan_queue");
809 
810 		p->flags |= PG_PAGEOUT;
811 		uvmexp.paging++;
812 		uvm_pagedequeue(p);
813 
814 		uvmexp.pgswapout++;
815 		uvm_unlock_pageq();
816 
817 		/*
818 		 * add the new page to the cluster.
819 		 */
820 
821 		if (swapcluster_add(&swc, p)) {
822 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
823 			UVM_PAGE_OWN(p, NULL);
824 			uvm_lock_pageq();
825 			uvmexp.paging--;
826 			dirtyreacts++;
827 			uvm_pageactivate(p);
828 			simple_unlock(slock);
829 			continue;
830 		}
831 		simple_unlock(slock);
832 
833 		swapcluster_flush(&swc, false);
834 		uvm_lock_pageq();
835 
836 		/*
837 		 * the pageout is in progress.  bump counters and set up
838 		 * for the next loop.
839 		 */
840 
841 		uvmexp.pdpending++;
842 
843 #else /* defined(VMSWAP) */
844 		uvm_pageactivate(p);
845 		simple_unlock(slock);
846 #endif /* defined(VMSWAP) */
847 	}
848 
849 #if defined(VMSWAP)
850 	uvm_unlock_pageq();
851 	swapcluster_flush(&swc, true);
852 	uvm_lock_pageq();
853 #endif /* defined(VMSWAP) */
854 }
855 
856 /*
857  * uvmpd_scan: scan the page queues and attempt to meet our targets.
858  *
859  * => called with pageq's locked
860  */
861 
862 static void
863 uvmpd_scan(void)
864 {
865 	int swap_shortage, pages_freed;
866 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
867 
868 	uvmexp.pdrevs++;
869 
870 #ifndef __SWAP_BROKEN
871 
872 	/*
873 	 * swap out some processes if we are below our free target.
874 	 * we need to unlock the page queues for this.
875 	 */
876 
877 	if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0 &&
878 	    uvm.swapout_enabled) {
879 		uvmexp.pdswout++;
880 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
881 		    uvmexp.free, uvmexp.freetarg, 0, 0);
882 		uvm_unlock_pageq();
883 		uvm_swapout_threads();
884 		uvm_lock_pageq();
885 
886 	}
887 #endif
888 
889 	/*
890 	 * now we want to work on meeting our targets.   first we work on our
891 	 * free target by converting inactive pages into free pages.  then
892 	 * we work on meeting our inactive target by converting active pages
893 	 * to inactive ones.
894 	 */
895 
896 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
897 
898 	pages_freed = uvmexp.pdfreed;
899 	uvmpd_scan_queue();
900 	pages_freed = uvmexp.pdfreed - pages_freed;
901 
902 	/*
903 	 * detect if we're not going to be able to page anything out
904 	 * until we free some swap resources from active pages.
905 	 */
906 
907 	swap_shortage = 0;
908 	if (uvmexp.free < uvmexp.freetarg &&
909 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
910 	    !uvm_swapisfull() &&
911 	    pages_freed == 0) {
912 		swap_shortage = uvmexp.freetarg - uvmexp.free;
913 	}
914 
915 	uvmpdpol_balancequeue(swap_shortage);
916 }
917 
918 /*
919  * uvm_reclaimable: decide whether to wait for pagedaemon.
920  *
921  * => return true if it seems to be worth to do uvm_wait.
922  *
923  * XXX should be tunable.
924  * XXX should consider pools, etc?
925  */
926 
927 bool
928 uvm_reclaimable(void)
929 {
930 	int filepages;
931 	int active, inactive;
932 
933 	/*
934 	 * if swap is not full, no problem.
935 	 */
936 
937 	if (!uvm_swapisfull()) {
938 		return true;
939 	}
940 
941 	/*
942 	 * file-backed pages can be reclaimed even when swap is full.
943 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
944 	 *
945 	 * XXX assume the worst case, ie. all wired pages are file-backed.
946 	 *
947 	 * XXX should consider about other reclaimable memory.
948 	 * XXX ie. pools, traditional buffer cache.
949 	 */
950 
951 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
952 	uvm_estimatepageable(&active, &inactive);
953 	if (filepages >= MIN((active + inactive) >> 4,
954 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
955 		return true;
956 	}
957 
958 	/*
959 	 * kill the process, fail allocation, etc..
960 	 */
961 
962 	return false;
963 }
964 
965 void
966 uvm_estimatepageable(int *active, int *inactive)
967 {
968 
969 	uvmpdpol_estimatepageable(active, inactive);
970 }
971