xref: /netbsd-src/sys/uvm/uvm_pdaemon.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: uvm_pdaemon.c,v 1.97 2008/12/13 11:26:57 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Charles D. Cranor,
23  *      Washington University, the University of California, Berkeley and
24  *      its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Permission to use, copy, modify and distribute this software and
49  * its documentation is hereby granted, provided that both the copyright
50  * notice and this permission notice appear in all copies of the
51  * software, derivative works or modified versions, and any portions
52  * thereof, and that both notices appear in supporting documentation.
53  *
54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57  *
58  * Carnegie Mellon requests users of this software to return to
59  *
60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61  *  School of Computer Science
62  *  Carnegie Mellon University
63  *  Pittsburgh PA 15213-3890
64  *
65  * any improvements or extensions that they make and grant Carnegie the
66  * rights to redistribute these changes.
67  */
68 
69 /*
70  * uvm_pdaemon.c: the page daemon
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.97 2008/12/13 11:26:57 ad Exp $");
75 
76 #include "opt_uvmhist.h"
77 #include "opt_readahead.h"
78 
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 #include <sys/buf.h>
85 #include <sys/module.h>
86 #include <sys/atomic.h>
87 
88 #include <uvm/uvm.h>
89 #include <uvm/uvm_pdpolicy.h>
90 
91 /*
92  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
93  * in a pass thru the inactive list when swap is full.  the value should be
94  * "small"... if it's too large we'll cycle the active pages thru the inactive
95  * queue too quickly to for them to be referenced and avoid being freed.
96  */
97 
98 #define	UVMPD_NUMDIRTYREACTS	16
99 
100 #define	UVMPD_NUMTRYLOCKOWNER	16
101 
102 /*
103  * local prototypes
104  */
105 
106 static void	uvmpd_scan(void);
107 static void	uvmpd_scan_queue(void);
108 static void	uvmpd_tune(void);
109 
110 unsigned int uvm_pagedaemon_waiters;
111 
112 /*
113  * XXX hack to avoid hangs when large processes fork.
114  */
115 u_int uvm_extrapages;
116 
117 /*
118  * uvm_wait: wait (sleep) for the page daemon to free some pages
119  *
120  * => should be called with all locks released
121  * => should _not_ be called by the page daemon (to avoid deadlock)
122  */
123 
124 void
125 uvm_wait(const char *wmsg)
126 {
127 	int timo = 0;
128 
129 	mutex_spin_enter(&uvm_fpageqlock);
130 
131 	/*
132 	 * check for page daemon going to sleep (waiting for itself)
133 	 */
134 
135 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
136 		/*
137 		 * now we have a problem: the pagedaemon wants to go to
138 		 * sleep until it frees more memory.   but how can it
139 		 * free more memory if it is asleep?  that is a deadlock.
140 		 * we have two options:
141 		 *  [1] panic now
142 		 *  [2] put a timeout on the sleep, thus causing the
143 		 *      pagedaemon to only pause (rather than sleep forever)
144 		 *
145 		 * note that option [2] will only help us if we get lucky
146 		 * and some other process on the system breaks the deadlock
147 		 * by exiting or freeing memory (thus allowing the pagedaemon
148 		 * to continue).  for now we panic if DEBUG is defined,
149 		 * otherwise we hope for the best with option [2] (better
150 		 * yet, this should never happen in the first place!).
151 		 */
152 
153 		printf("pagedaemon: deadlock detected!\n");
154 		timo = hz >> 3;		/* set timeout */
155 #if defined(DEBUG)
156 		/* DEBUG: panic so we can debug it */
157 		panic("pagedaemon deadlock");
158 #endif
159 	}
160 
161 	uvm_pagedaemon_waiters++;
162 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
163 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
164 }
165 
166 /*
167  * uvm_kick_pdaemon: perform checks to determine if we need to
168  * give the pagedaemon a nudge, and do so if necessary.
169  *
170  * => called with uvm_fpageqlock held.
171  */
172 
173 void
174 uvm_kick_pdaemon(void)
175 {
176 
177 	KASSERT(mutex_owned(&uvm_fpageqlock));
178 
179 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
180 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
181 	     uvmpdpol_needsscan_p())) {
182 		wakeup(&uvm.pagedaemon);
183 	}
184 }
185 
186 /*
187  * uvmpd_tune: tune paging parameters
188  *
189  * => called when ever memory is added (or removed?) to the system
190  * => caller must call with page queues locked
191  */
192 
193 static void
194 uvmpd_tune(void)
195 {
196 	int val;
197 
198 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
199 
200 	/*
201 	 * try to keep 0.5% of available RAM free, but limit to between
202 	 * 128k and 1024k per-CPU.  XXX: what are these values good for?
203 	 */
204 	val = uvmexp.npages / 200;
205 	val = MAX(val, (128*1024) >> PAGE_SHIFT);
206 	val = MIN(val, (1024*1024) >> PAGE_SHIFT);
207 	val *= ncpu;
208 
209 	/* Make sure there's always a user page free. */
210 	if (val < uvmexp.reserve_kernel + 1)
211 		val = uvmexp.reserve_kernel + 1;
212 	uvmexp.freemin = val;
213 
214 	/* Calculate free target. */
215 	val = (uvmexp.freemin * 4) / 3;
216 	if (val <= uvmexp.freemin)
217 		val = uvmexp.freemin + 1;
218 	uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
219 
220 	uvmexp.wiredmax = uvmexp.npages / 3;
221 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
222 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
223 }
224 
225 /*
226  * uvm_pageout: the main loop for the pagedaemon
227  */
228 
229 void
230 uvm_pageout(void *arg)
231 {
232 	int bufcnt, npages = 0;
233 	int extrapages = 0;
234 	struct pool *pp;
235 	uint64_t where;
236 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
237 
238 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
239 
240 	/*
241 	 * ensure correct priority and set paging parameters...
242 	 */
243 
244 	uvm.pagedaemon_lwp = curlwp;
245 	mutex_enter(&uvm_pageqlock);
246 	npages = uvmexp.npages;
247 	uvmpd_tune();
248 	mutex_exit(&uvm_pageqlock);
249 
250 	/*
251 	 * main loop
252 	 */
253 
254 	for (;;) {
255 		bool needsscan, needsfree;
256 
257 		mutex_spin_enter(&uvm_fpageqlock);
258 		if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) {
259 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
260 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
261 			    &uvm_fpageqlock, false, "pgdaemon", 0);
262 			uvmexp.pdwoke++;
263 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
264 		} else {
265 			mutex_spin_exit(&uvm_fpageqlock);
266 		}
267 
268 		/*
269 		 * now lock page queues and recompute inactive count
270 		 */
271 
272 		mutex_enter(&uvm_pageqlock);
273 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
274 			npages = uvmexp.npages;
275 			extrapages = uvm_extrapages;
276 			mutex_spin_enter(&uvm_fpageqlock);
277 			uvmpd_tune();
278 			mutex_spin_exit(&uvm_fpageqlock);
279 		}
280 
281 		uvmpdpol_tune();
282 
283 		/*
284 		 * Estimate a hint.  Note that bufmem are returned to
285 		 * system only when entire pool page is empty.
286 		 */
287 		mutex_spin_enter(&uvm_fpageqlock);
288 		bufcnt = uvmexp.freetarg - uvmexp.free;
289 		if (bufcnt < 0)
290 			bufcnt = 0;
291 
292 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d",
293 		    uvmexp.free, uvmexp.freetarg, 0,0);
294 
295 		needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
296 		needsscan = needsfree || uvmpdpol_needsscan_p();
297 
298 		/*
299 		 * scan if needed
300 		 */
301 		if (needsscan) {
302 			mutex_spin_exit(&uvm_fpageqlock);
303 			uvmpd_scan();
304 			mutex_spin_enter(&uvm_fpageqlock);
305 		}
306 
307 		/*
308 		 * if there's any free memory to be had,
309 		 * wake up any waiters.
310 		 */
311 		if (uvmexp.free > uvmexp.reserve_kernel ||
312 		    uvmexp.paging == 0) {
313 			wakeup(&uvmexp.free);
314 			uvm_pagedaemon_waiters = 0;
315 		}
316 		mutex_spin_exit(&uvm_fpageqlock);
317 
318 		/*
319 		 * scan done.  unlock page queues (the only lock we are holding)
320 		 */
321 		mutex_exit(&uvm_pageqlock);
322 
323 		/*
324 		 * if we don't need free memory, we're done.
325 		 */
326 
327 		if (!needsfree)
328 			continue;
329 
330 		/*
331 		 * start draining pool resources now that we're not
332 		 * holding any locks.
333 		 */
334 		pool_drain_start(&pp, &where);
335 
336 		/*
337 		 * kill unused metadata buffers.
338 		 */
339 		mutex_enter(&bufcache_lock);
340 		buf_drain(bufcnt << PAGE_SHIFT);
341 		mutex_exit(&bufcache_lock);
342 
343 		/*
344 		 * complete draining the pools.
345 		 */
346 		pool_drain_end(pp, where);
347 	}
348 	/*NOTREACHED*/
349 }
350 
351 
352 /*
353  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
354  */
355 
356 void
357 uvm_aiodone_worker(struct work *wk, void *dummy)
358 {
359 	struct buf *bp = (void *)wk;
360 
361 	KASSERT(&bp->b_work == wk);
362 
363 	/*
364 	 * process an i/o that's done.
365 	 */
366 
367 	(*bp->b_iodone)(bp);
368 }
369 
370 void
371 uvm_pageout_start(int npages)
372 {
373 
374 	mutex_spin_enter(&uvm_fpageqlock);
375 	uvmexp.paging += npages;
376 	mutex_spin_exit(&uvm_fpageqlock);
377 }
378 
379 void
380 uvm_pageout_done(int npages)
381 {
382 
383 	mutex_spin_enter(&uvm_fpageqlock);
384 	KASSERT(uvmexp.paging >= npages);
385 	uvmexp.paging -= npages;
386 
387 	/*
388 	 * wake up either of pagedaemon or LWPs waiting for it.
389 	 */
390 
391 	if (uvmexp.free <= uvmexp.reserve_kernel) {
392 		wakeup(&uvm.pagedaemon);
393 	} else {
394 		wakeup(&uvmexp.free);
395 		uvm_pagedaemon_waiters = 0;
396 	}
397 	mutex_spin_exit(&uvm_fpageqlock);
398 }
399 
400 /*
401  * uvmpd_trylockowner: trylock the page's owner.
402  *
403  * => called with pageq locked.
404  * => resolve orphaned O->A loaned page.
405  * => return the locked mutex on success.  otherwise, return NULL.
406  */
407 
408 kmutex_t *
409 uvmpd_trylockowner(struct vm_page *pg)
410 {
411 	struct uvm_object *uobj = pg->uobject;
412 	kmutex_t *slock;
413 
414 	KASSERT(mutex_owned(&uvm_pageqlock));
415 
416 	if (uobj != NULL) {
417 		slock = &uobj->vmobjlock;
418 	} else {
419 		struct vm_anon *anon = pg->uanon;
420 
421 		KASSERT(anon != NULL);
422 		slock = &anon->an_lock;
423 	}
424 
425 	if (!mutex_tryenter(slock)) {
426 		return NULL;
427 	}
428 
429 	if (uobj == NULL) {
430 
431 		/*
432 		 * set PQ_ANON if it isn't set already.
433 		 */
434 
435 		if ((pg->pqflags & PQ_ANON) == 0) {
436 			KASSERT(pg->loan_count > 0);
437 			pg->loan_count--;
438 			pg->pqflags |= PQ_ANON;
439 			/* anon now owns it */
440 		}
441 	}
442 
443 	return slock;
444 }
445 
446 #if defined(VMSWAP)
447 struct swapcluster {
448 	int swc_slot;
449 	int swc_nallocated;
450 	int swc_nused;
451 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
452 };
453 
454 static void
455 swapcluster_init(struct swapcluster *swc)
456 {
457 
458 	swc->swc_slot = 0;
459 	swc->swc_nused = 0;
460 }
461 
462 static int
463 swapcluster_allocslots(struct swapcluster *swc)
464 {
465 	int slot;
466 	int npages;
467 
468 	if (swc->swc_slot != 0) {
469 		return 0;
470 	}
471 
472 	/* Even with strange MAXPHYS, the shift
473 	   implicitly rounds down to a page. */
474 	npages = MAXPHYS >> PAGE_SHIFT;
475 	slot = uvm_swap_alloc(&npages, true);
476 	if (slot == 0) {
477 		return ENOMEM;
478 	}
479 	swc->swc_slot = slot;
480 	swc->swc_nallocated = npages;
481 	swc->swc_nused = 0;
482 
483 	return 0;
484 }
485 
486 static int
487 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
488 {
489 	int slot;
490 	struct uvm_object *uobj;
491 
492 	KASSERT(swc->swc_slot != 0);
493 	KASSERT(swc->swc_nused < swc->swc_nallocated);
494 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
495 
496 	slot = swc->swc_slot + swc->swc_nused;
497 	uobj = pg->uobject;
498 	if (uobj == NULL) {
499 		KASSERT(mutex_owned(&pg->uanon->an_lock));
500 		pg->uanon->an_swslot = slot;
501 	} else {
502 		int result;
503 
504 		KASSERT(mutex_owned(&uobj->vmobjlock));
505 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
506 		if (result == -1) {
507 			return ENOMEM;
508 		}
509 	}
510 	swc->swc_pages[swc->swc_nused] = pg;
511 	swc->swc_nused++;
512 
513 	return 0;
514 }
515 
516 static void
517 swapcluster_flush(struct swapcluster *swc, bool now)
518 {
519 	int slot;
520 	int nused;
521 	int nallocated;
522 	int error;
523 
524 	if (swc->swc_slot == 0) {
525 		return;
526 	}
527 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
528 
529 	slot = swc->swc_slot;
530 	nused = swc->swc_nused;
531 	nallocated = swc->swc_nallocated;
532 
533 	/*
534 	 * if this is the final pageout we could have a few
535 	 * unused swap blocks.  if so, free them now.
536 	 */
537 
538 	if (nused < nallocated) {
539 		if (!now) {
540 			return;
541 		}
542 		uvm_swap_free(slot + nused, nallocated - nused);
543 	}
544 
545 	/*
546 	 * now start the pageout.
547 	 */
548 
549 	if (nused > 0) {
550 		uvmexp.pdpageouts++;
551 		uvm_pageout_start(nused);
552 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
553 		KASSERT(error == 0 || error == ENOMEM);
554 	}
555 
556 	/*
557 	 * zero swslot to indicate that we are
558 	 * no longer building a swap-backed cluster.
559 	 */
560 
561 	swc->swc_slot = 0;
562 	swc->swc_nused = 0;
563 }
564 
565 static int
566 swapcluster_nused(struct swapcluster *swc)
567 {
568 
569 	return swc->swc_nused;
570 }
571 
572 /*
573  * uvmpd_dropswap: free any swap allocated to this page.
574  *
575  * => called with owner locked.
576  * => return true if a page had an associated slot.
577  */
578 
579 static bool
580 uvmpd_dropswap(struct vm_page *pg)
581 {
582 	bool result = false;
583 	struct vm_anon *anon = pg->uanon;
584 
585 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
586 		uvm_swap_free(anon->an_swslot, 1);
587 		anon->an_swslot = 0;
588 		pg->flags &= ~PG_CLEAN;
589 		result = true;
590 	} else if (pg->pqflags & PQ_AOBJ) {
591 		int slot = uao_set_swslot(pg->uobject,
592 		    pg->offset >> PAGE_SHIFT, 0);
593 		if (slot) {
594 			uvm_swap_free(slot, 1);
595 			pg->flags &= ~PG_CLEAN;
596 			result = true;
597 		}
598 	}
599 
600 	return result;
601 }
602 
603 /*
604  * uvmpd_trydropswap: try to free any swap allocated to this page.
605  *
606  * => return true if a slot is successfully freed.
607  */
608 
609 bool
610 uvmpd_trydropswap(struct vm_page *pg)
611 {
612 	kmutex_t *slock;
613 	bool result;
614 
615 	if ((pg->flags & PG_BUSY) != 0) {
616 		return false;
617 	}
618 
619 	/*
620 	 * lock the page's owner.
621 	 */
622 
623 	slock = uvmpd_trylockowner(pg);
624 	if (slock == NULL) {
625 		return false;
626 	}
627 
628 	/*
629 	 * skip this page if it's busy.
630 	 */
631 
632 	if ((pg->flags & PG_BUSY) != 0) {
633 		mutex_exit(slock);
634 		return false;
635 	}
636 
637 	result = uvmpd_dropswap(pg);
638 
639 	mutex_exit(slock);
640 
641 	return result;
642 }
643 
644 #endif /* defined(VMSWAP) */
645 
646 /*
647  * uvmpd_scan_queue: scan an replace candidate list for pages
648  * to clean or free.
649  *
650  * => called with page queues locked
651  * => we work on meeting our free target by converting inactive pages
652  *    into free pages.
653  * => we handle the building of swap-backed clusters
654  */
655 
656 static void
657 uvmpd_scan_queue(void)
658 {
659 	struct vm_page *p;
660 	struct uvm_object *uobj;
661 	struct vm_anon *anon;
662 #if defined(VMSWAP)
663 	struct swapcluster swc;
664 #endif /* defined(VMSWAP) */
665 	int dirtyreacts;
666 	int lockownerfail;
667 	kmutex_t *slock;
668 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
669 
670 	/*
671 	 * swslot is non-zero if we are building a swap cluster.  we want
672 	 * to stay in the loop while we have a page to scan or we have
673 	 * a swap-cluster to build.
674 	 */
675 
676 #if defined(VMSWAP)
677 	swapcluster_init(&swc);
678 #endif /* defined(VMSWAP) */
679 
680 	dirtyreacts = 0;
681 	lockownerfail = 0;
682 	uvmpdpol_scaninit();
683 
684 	while (/* CONSTCOND */ 1) {
685 
686 		/*
687 		 * see if we've met the free target.
688 		 */
689 
690 		if (uvmexp.free + uvmexp.paging
691 #if defined(VMSWAP)
692 		    + swapcluster_nused(&swc)
693 #endif /* defined(VMSWAP) */
694 		    >= uvmexp.freetarg << 2 ||
695 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
696 			UVMHIST_LOG(pdhist,"  met free target: "
697 				    "exit loop", 0, 0, 0, 0);
698 			break;
699 		}
700 
701 		p = uvmpdpol_selectvictim();
702 		if (p == NULL) {
703 			break;
704 		}
705 		KASSERT(uvmpdpol_pageisqueued_p(p));
706 		KASSERT(p->wire_count == 0);
707 
708 		/*
709 		 * we are below target and have a new page to consider.
710 		 */
711 
712 		anon = p->uanon;
713 		uobj = p->uobject;
714 
715 		/*
716 		 * first we attempt to lock the object that this page
717 		 * belongs to.  if our attempt fails we skip on to
718 		 * the next page (no harm done).  it is important to
719 		 * "try" locking the object as we are locking in the
720 		 * wrong order (pageq -> object) and we don't want to
721 		 * deadlock.
722 		 *
723 		 * the only time we expect to see an ownerless page
724 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
725 		 * anon has loaned a page from a uvm_object and the
726 		 * uvm_object has dropped the ownership.  in that
727 		 * case, the anon can "take over" the loaned page
728 		 * and make it its own.
729 		 */
730 
731 		slock = uvmpd_trylockowner(p);
732 		if (slock == NULL) {
733 			/*
734 			 * yield cpu to make a chance for an LWP holding
735 			 * the lock run.  otherwise we can busy-loop too long
736 			 * if the page queue is filled with a lot of pages
737 			 * from few objects.
738 			 */
739 			lockownerfail++;
740 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
741 				mutex_exit(&uvm_pageqlock);
742 				/* XXX Better than yielding but inadequate. */
743 				kpause("livelock", false, 1, NULL);
744 				mutex_enter(&uvm_pageqlock);
745 				lockownerfail = 0;
746 			}
747 			continue;
748 		}
749 		if (p->flags & PG_BUSY) {
750 			mutex_exit(slock);
751 			uvmexp.pdbusy++;
752 			continue;
753 		}
754 
755 		/* does the page belong to an object? */
756 		if (uobj != NULL) {
757 			uvmexp.pdobscan++;
758 		} else {
759 #if defined(VMSWAP)
760 			KASSERT(anon != NULL);
761 			uvmexp.pdanscan++;
762 #else /* defined(VMSWAP) */
763 			panic("%s: anon", __func__);
764 #endif /* defined(VMSWAP) */
765 		}
766 
767 
768 		/*
769 		 * we now have the object and the page queues locked.
770 		 * if the page is not swap-backed, call the object's
771 		 * pager to flush and free the page.
772 		 */
773 
774 #if defined(READAHEAD_STATS)
775 		if ((p->pqflags & PQ_READAHEAD) != 0) {
776 			p->pqflags &= ~PQ_READAHEAD;
777 			uvm_ra_miss.ev_count++;
778 		}
779 #endif /* defined(READAHEAD_STATS) */
780 
781 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
782 			KASSERT(uobj != NULL);
783 			mutex_exit(&uvm_pageqlock);
784 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
785 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
786 			mutex_enter(&uvm_pageqlock);
787 			continue;
788 		}
789 
790 		/*
791 		 * the page is swap-backed.  remove all the permissions
792 		 * from the page so we can sync the modified info
793 		 * without any race conditions.  if the page is clean
794 		 * we can free it now and continue.
795 		 */
796 
797 		pmap_page_protect(p, VM_PROT_NONE);
798 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
799 			p->flags &= ~(PG_CLEAN);
800 		}
801 		if (p->flags & PG_CLEAN) {
802 			int slot;
803 			int pageidx;
804 
805 			pageidx = p->offset >> PAGE_SHIFT;
806 			uvm_pagefree(p);
807 			uvmexp.pdfreed++;
808 
809 			/*
810 			 * for anons, we need to remove the page
811 			 * from the anon ourselves.  for aobjs,
812 			 * pagefree did that for us.
813 			 */
814 
815 			if (anon) {
816 				KASSERT(anon->an_swslot != 0);
817 				anon->an_page = NULL;
818 				slot = anon->an_swslot;
819 			} else {
820 				slot = uao_find_swslot(uobj, pageidx);
821 			}
822 			mutex_exit(slock);
823 
824 			if (slot > 0) {
825 				/* this page is now only in swap. */
826 				mutex_enter(&uvm_swap_data_lock);
827 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
828 				uvmexp.swpgonly++;
829 				mutex_exit(&uvm_swap_data_lock);
830 			}
831 			continue;
832 		}
833 
834 #if defined(VMSWAP)
835 		/*
836 		 * this page is dirty, skip it if we'll have met our
837 		 * free target when all the current pageouts complete.
838 		 */
839 
840 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
841 			mutex_exit(slock);
842 			continue;
843 		}
844 
845 		/*
846 		 * free any swap space allocated to the page since
847 		 * we'll have to write it again with its new data.
848 		 */
849 
850 		uvmpd_dropswap(p);
851 
852 		/*
853 		 * start new swap pageout cluster (if necessary).
854 		 *
855 		 * if swap is full reactivate this page so that
856 		 * we eventually cycle all pages through the
857 		 * inactive queue.
858 		 */
859 
860 		if (swapcluster_allocslots(&swc)) {
861 			dirtyreacts++;
862 			uvm_pageactivate(p);
863 			mutex_exit(slock);
864 			continue;
865 		}
866 
867 		/*
868 		 * at this point, we're definitely going reuse this
869 		 * page.  mark the page busy and delayed-free.
870 		 * we should remove the page from the page queues
871 		 * so we don't ever look at it again.
872 		 * adjust counters and such.
873 		 */
874 
875 		p->flags |= PG_BUSY;
876 		UVM_PAGE_OWN(p, "scan_queue");
877 
878 		p->flags |= PG_PAGEOUT;
879 		uvm_pagedequeue(p);
880 
881 		uvmexp.pgswapout++;
882 		mutex_exit(&uvm_pageqlock);
883 
884 		/*
885 		 * add the new page to the cluster.
886 		 */
887 
888 		if (swapcluster_add(&swc, p)) {
889 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
890 			UVM_PAGE_OWN(p, NULL);
891 			mutex_enter(&uvm_pageqlock);
892 			dirtyreacts++;
893 			uvm_pageactivate(p);
894 			mutex_exit(slock);
895 			continue;
896 		}
897 		mutex_exit(slock);
898 
899 		swapcluster_flush(&swc, false);
900 		mutex_enter(&uvm_pageqlock);
901 
902 		/*
903 		 * the pageout is in progress.  bump counters and set up
904 		 * for the next loop.
905 		 */
906 
907 		uvmexp.pdpending++;
908 
909 #else /* defined(VMSWAP) */
910 		uvm_pageactivate(p);
911 		mutex_exit(slock);
912 #endif /* defined(VMSWAP) */
913 	}
914 
915 #if defined(VMSWAP)
916 	mutex_exit(&uvm_pageqlock);
917 	swapcluster_flush(&swc, true);
918 	mutex_enter(&uvm_pageqlock);
919 #endif /* defined(VMSWAP) */
920 }
921 
922 /*
923  * uvmpd_scan: scan the page queues and attempt to meet our targets.
924  *
925  * => called with pageq's locked
926  */
927 
928 static void
929 uvmpd_scan(void)
930 {
931 	int swap_shortage, pages_freed;
932 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
933 
934 	uvmexp.pdrevs++;
935 
936 	/*
937 	 * work on meeting our targets.   first we work on our free target
938 	 * by converting inactive pages into free pages.  then we work on
939 	 * meeting our inactive target by converting active pages to
940 	 * inactive ones.
941 	 */
942 
943 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
944 
945 	pages_freed = uvmexp.pdfreed;
946 	uvmpd_scan_queue();
947 	pages_freed = uvmexp.pdfreed - pages_freed;
948 
949 	/*
950 	 * detect if we're not going to be able to page anything out
951 	 * until we free some swap resources from active pages.
952 	 */
953 
954 	swap_shortage = 0;
955 	if (uvmexp.free < uvmexp.freetarg &&
956 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
957 	    !uvm_swapisfull() &&
958 	    pages_freed == 0) {
959 		swap_shortage = uvmexp.freetarg - uvmexp.free;
960 	}
961 
962 	uvmpdpol_balancequeue(swap_shortage);
963 
964 	/*
965 	 * swap out some processes if we are still below the minimum
966 	 * free target.  we need to unlock the page queues for this.
967 	 */
968 
969 	if (uvmexp.free < uvmexp.freemin && uvmexp.nswapdev != 0 &&
970 	    uvm.swapout_enabled) {
971 		uvmexp.pdswout++;
972 		UVMHIST_LOG(pdhist,"  free %d < min %d: swapout",
973 		    uvmexp.free, uvmexp.freemin, 0, 0);
974 		mutex_exit(&uvm_pageqlock);
975 		uvm_swapout_threads();
976 		mutex_enter(&uvm_pageqlock);
977 	}
978 
979 	/*
980 	 * if still below the minimum target, try unloading kernel
981 	 * modules.
982 	 */
983 
984 	if (uvmexp.free < uvmexp.freemin) {
985 		module_thread_kick();
986 	}
987 }
988 
989 /*
990  * uvm_reclaimable: decide whether to wait for pagedaemon.
991  *
992  * => return true if it seems to be worth to do uvm_wait.
993  *
994  * XXX should be tunable.
995  * XXX should consider pools, etc?
996  */
997 
998 bool
999 uvm_reclaimable(void)
1000 {
1001 	int filepages;
1002 	int active, inactive;
1003 
1004 	/*
1005 	 * if swap is not full, no problem.
1006 	 */
1007 
1008 	if (!uvm_swapisfull()) {
1009 		return true;
1010 	}
1011 
1012 	/*
1013 	 * file-backed pages can be reclaimed even when swap is full.
1014 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
1015 	 *
1016 	 * XXX assume the worst case, ie. all wired pages are file-backed.
1017 	 *
1018 	 * XXX should consider about other reclaimable memory.
1019 	 * XXX ie. pools, traditional buffer cache.
1020 	 */
1021 
1022 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
1023 	uvm_estimatepageable(&active, &inactive);
1024 	if (filepages >= MIN((active + inactive) >> 4,
1025 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
1026 		return true;
1027 	}
1028 
1029 	/*
1030 	 * kill the process, fail allocation, etc..
1031 	 */
1032 
1033 	return false;
1034 }
1035 
1036 void
1037 uvm_estimatepageable(int *active, int *inactive)
1038 {
1039 
1040 	uvmpdpol_estimatepageable(active, inactive);
1041 }
1042