xref: /openbsd-src/sys/uvm/uvm_pdaemon.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: uvm_pdaemon.c,v 1.93 2021/06/29 01:46:35 jsg Exp $	*/
2 /*	$NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
38  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  * uvm_pdaemon.c: the page daemon
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/pool.h>
73 #include <sys/proc.h>
74 #include <sys/buf.h>
75 #include <sys/mount.h>
76 #include <sys/atomic.h>
77 
78 #ifdef HIBERNATE
79 #include <sys/hibernate.h>
80 #endif
81 
82 #include <uvm/uvm.h>
83 
84 #include "drm.h"
85 
86 #if NDRM > 0
87 extern void drmbackoff(long);
88 #endif
89 
90 /*
91  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
92  * in a pass thru the inactive list when swap is full.  the value should be
93  * "small"... if it's too large we'll cycle the active pages thru the inactive
94  * queue too quickly to for them to be referenced and avoid being freed.
95  */
96 
97 #define UVMPD_NUMDIRTYREACTS 16
98 
99 
100 /*
101  * local prototypes
102  */
103 
104 void		uvmpd_scan(void);
105 boolean_t	uvmpd_scan_inactive(struct pglist *);
106 void		uvmpd_tune(void);
107 void		uvmpd_drop(struct pglist *);
108 
109 /*
110  * uvm_wait: wait (sleep) for the page daemon to free some pages
111  *
112  * => should be called with all locks released
113  * => should _not_ be called by the page daemon (to avoid deadlock)
114  */
115 
116 void
117 uvm_wait(const char *wmsg)
118 {
119 	uint64_t timo = INFSLP;
120 
121 #ifdef DIAGNOSTIC
122 	if (curproc == &proc0)
123 		panic("%s: cannot sleep for memory during boot", __func__);
124 #endif
125 
126 	/* check for page daemon going to sleep (waiting for itself) */
127 	if (curproc == uvm.pagedaemon_proc) {
128 		printf("uvm_wait emergency bufbackoff\n");
129 		if (bufbackoff(NULL, 4) == 0)
130 			return;
131 		/*
132 		 * now we have a problem: the pagedaemon wants to go to
133 		 * sleep until it frees more memory.   but how can it
134 		 * free more memory if it is asleep?  that is a deadlock.
135 		 * we have two options:
136 		 *  [1] panic now
137 		 *  [2] put a timeout on the sleep, thus causing the
138 		 *      pagedaemon to only pause (rather than sleep forever)
139 		 *
140 		 * note that option [2] will only help us if we get lucky
141 		 * and some other process on the system breaks the deadlock
142 		 * by exiting or freeing memory (thus allowing the pagedaemon
143 		 * to continue).  for now we panic if DEBUG is defined,
144 		 * otherwise we hope for the best with option [2] (better
145 		 * yet, this should never happen in the first place!).
146 		 */
147 
148 		printf("pagedaemon: deadlock detected!\n");
149 		timo = MSEC_TO_NSEC(125);	/* set timeout */
150 #if defined(DEBUG)
151 		/* DEBUG: panic so we can debug it */
152 		panic("pagedaemon deadlock");
153 #endif
154 	}
155 
156 	uvm_lock_fpageq();
157 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
158 	msleep_nsec(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo);
159 }
160 
161 /*
162  * uvmpd_tune: tune paging parameters
163  *
164  * => called whenever memory is added to (or removed from?) the system
165  * => caller must call with page queues locked
166  */
167 
168 void
169 uvmpd_tune(void)
170 {
171 
172 	uvmexp.freemin = uvmexp.npages / 30;
173 
174 	/* between 16k and 512k */
175 	/* XXX:  what are these values good for? */
176 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
177 #if 0
178 	uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT);
179 #endif
180 
181 	/* Make sure there's always a user page free. */
182 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
183 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
184 
185 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
186 	if (uvmexp.freetarg <= uvmexp.freemin)
187 		uvmexp.freetarg = uvmexp.freemin + 1;
188 
189 	/* uvmexp.inactarg: computed in main daemon loop */
190 
191 	uvmexp.wiredmax = uvmexp.npages / 3;
192 }
193 
194 /*
195  * Indicate to the page daemon that a nowait call failed and it should
196  * recover at least some memory in the most restricted region (assumed
197  * to be dma_constraint).
198  */
199 volatile int uvm_nowait_failed;
200 
201 /*
202  * uvm_pageout: the main loop for the pagedaemon
203  */
204 void
205 uvm_pageout(void *arg)
206 {
207 	struct uvm_constraint_range constraint;
208 	struct uvm_pmalloc *pma;
209 	int npages = 0;
210 
211 	/* ensure correct priority and set paging parameters... */
212 	uvm.pagedaemon_proc = curproc;
213 	(void) spl0();
214 	uvm_lock_pageq();
215 	npages = uvmexp.npages;
216 	uvmpd_tune();
217 	uvm_unlock_pageq();
218 
219 	for (;;) {
220 		long size;
221 
222 		uvm_lock_fpageq();
223 		if (!uvm_nowait_failed && TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
224 			msleep_nsec(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
225 			    "pgdaemon", INFSLP);
226 			uvmexp.pdwoke++;
227 		}
228 
229 		if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) {
230 			pma->pm_flags |= UVM_PMA_BUSY;
231 			constraint = pma->pm_constraint;
232 		} else {
233 			if (uvm_nowait_failed) {
234 				/*
235 				 * XXX realisticly, this is what our
236 				 * nowait callers probably care about
237 				 */
238 				constraint = dma_constraint;
239 				uvm_nowait_failed = 0;
240 			} else
241 				constraint = no_constraint;
242 		}
243 
244 		uvm_unlock_fpageq();
245 
246 		/* now lock page queues and recompute inactive count */
247 		uvm_lock_pageq();
248 		if (npages != uvmexp.npages) {	/* check for new pages? */
249 			npages = uvmexp.npages;
250 			uvmpd_tune();
251 		}
252 
253 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
254 		if (uvmexp.inactarg <= uvmexp.freetarg) {
255 			uvmexp.inactarg = uvmexp.freetarg + 1;
256 		}
257 
258 		/* Reclaim pages from the buffer cache if possible. */
259 		size = 0;
260 		if (pma != NULL)
261 			size += pma->pm_size >> PAGE_SHIFT;
262 		if (uvmexp.free - BUFPAGES_DEFICIT < uvmexp.freetarg)
263 			size += uvmexp.freetarg - (uvmexp.free -
264 			    BUFPAGES_DEFICIT);
265 		if (size == 0)
266 			size = 16; /* XXX */
267 		uvm_unlock_pageq();
268 		(void) bufbackoff(&constraint, size * 2);
269 #if NDRM > 0
270 		drmbackoff(size * 2);
271 #endif
272 		uvm_lock_pageq();
273 
274 		/* Scan if needed to meet our targets. */
275 		if (pma != NULL ||
276 		    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
277 		    ((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
278 			uvmpd_scan();
279 		}
280 
281 		/*
282 		 * if there's any free memory to be had,
283 		 * wake up any waiters.
284 		 */
285 		uvm_lock_fpageq();
286 		if (uvmexp.free > uvmexp.reserve_kernel ||
287 		    uvmexp.paging == 0) {
288 			wakeup(&uvmexp.free);
289 		}
290 
291 		if (pma != NULL) {
292 			/*
293 			 * XXX If UVM_PMA_FREED isn't set, no pages
294 			 * were freed.  Should we set UVM_PMA_FAIL in
295 			 * that case?
296 			 */
297 			pma->pm_flags &= ~UVM_PMA_BUSY;
298 			if (pma->pm_flags & UVM_PMA_FREED) {
299 				pma->pm_flags &= ~UVM_PMA_LINKED;
300 				TAILQ_REMOVE(&uvm.pmr_control.allocs, pma,
301 				    pmq);
302 				wakeup(pma);
303 			}
304 		}
305 		uvm_unlock_fpageq();
306 
307 		/* scan done. unlock page queues (only lock we are holding) */
308 		uvm_unlock_pageq();
309 
310 		sched_pause(yield);
311 	}
312 	/*NOTREACHED*/
313 }
314 
315 
316 /*
317  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
318  */
319 void
320 uvm_aiodone_daemon(void *arg)
321 {
322 	int s, free;
323 	struct buf *bp, *nbp;
324 
325 	uvm.aiodoned_proc = curproc;
326 
327 	for (;;) {
328 		/*
329 		 * Check for done aio structures. If we've got structures to
330 		 * process, do so. Otherwise sleep while avoiding races.
331 		 */
332 		mtx_enter(&uvm.aiodoned_lock);
333 		while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL)
334 			msleep_nsec(&uvm.aiodoned, &uvm.aiodoned_lock,
335 			    PVM, "aiodoned", INFSLP);
336 		/* Take the list for ourselves. */
337 		TAILQ_INIT(&uvm.aio_done);
338 		mtx_leave(&uvm.aiodoned_lock);
339 
340 		/* process each i/o that's done. */
341 		free = uvmexp.free;
342 		while (bp != NULL) {
343 			if (bp->b_flags & B_PDAEMON) {
344 				uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT;
345 			}
346 			nbp = TAILQ_NEXT(bp, b_freelist);
347 			s = splbio();	/* b_iodone must by called at splbio */
348 			(*bp->b_iodone)(bp);
349 			splx(s);
350 			bp = nbp;
351 
352 			sched_pause(yield);
353 		}
354 		uvm_lock_fpageq();
355 		wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon :
356 		    &uvmexp.free);
357 		uvm_unlock_fpageq();
358 	}
359 }
360 
361 
362 
363 /*
364  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
365  *
366  * => called with page queues locked
367  * => we work on meeting our free target by converting inactive pages
368  *    into free pages.
369  * => we handle the building of swap-backed clusters
370  * => we return TRUE if we are exiting because we met our target
371  */
372 
373 boolean_t
374 uvmpd_scan_inactive(struct pglist *pglst)
375 {
376 	boolean_t retval = FALSE;	/* assume we haven't hit target */
377 	int free, result;
378 	struct vm_page *p, *nextpg;
379 	struct uvm_object *uobj;
380 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
381 	int npages;
382 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
383 	int swnpages, swcpages;				/* XXX: see below */
384 	int swslot;
385 	struct vm_anon *anon;
386 	boolean_t swap_backed;
387 	vaddr_t start;
388 	int dirtyreacts;
389 
390 	/*
391 	 * note: we currently keep swap-backed pages on a separate inactive
392 	 * list from object-backed pages.   however, merging the two lists
393 	 * back together again hasn't been ruled out.   thus, we keep our
394 	 * swap cluster in "swpps" rather than in pps (allows us to mix
395 	 * clustering types in the event of a mixed inactive queue).
396 	 */
397 	/*
398 	 * swslot is non-zero if we are building a swap cluster.  we want
399 	 * to stay in the loop while we have a page to scan or we have
400 	 * a swap-cluster to build.
401 	 */
402 	swslot = 0;
403 	swnpages = swcpages = 0;
404 	free = 0;
405 	dirtyreacts = 0;
406 
407 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
408 		/*
409 		 * note that p can be NULL iff we have traversed the whole
410 		 * list and need to do one final swap-backed clustered pageout.
411 		 */
412 		uobj = NULL;
413 		anon = NULL;
414 
415 		if (p) {
416 			/*
417 			 * update our copy of "free" and see if we've met
418 			 * our target
419 			 */
420 			free = uvmexp.free - BUFPAGES_DEFICIT;
421 
422 			if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
423 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
424 				retval = TRUE;
425 
426 				if (swslot == 0) {
427 					/* exit now if no swap-i/o pending */
428 					break;
429 				}
430 
431 				/* set p to null to signal final swap i/o */
432 				p = NULL;
433 			}
434 		}
435 
436 		if (p) {	/* if (we have a new page to consider) */
437 			/*
438 			 * we are below target and have a new page to consider.
439 			 */
440 			uvmexp.pdscans++;
441 			nextpg = TAILQ_NEXT(p, pageq);
442 
443 			/*
444 			 * move referenced pages back to active queue and
445 			 * skip to next page (unlikely to happen since
446 			 * inactive pages shouldn't have any valid mappings
447 			 * and we cleared reference before deactivating).
448 			 */
449 
450 			if (pmap_is_referenced(p)) {
451 				uvm_pageactivate(p);
452 				uvmexp.pdreact++;
453 				continue;
454 			}
455 
456 			if (p->pg_flags & PQ_ANON) {
457 				anon = p->uanon;
458 				KASSERT(anon != NULL);
459 				if (rw_enter(anon->an_lock,
460 				    RW_WRITE|RW_NOSLEEP)) {
461 					/* lock failed, skip this page */
462 					continue;
463 				}
464 				if (p->pg_flags & PG_BUSY) {
465 					rw_exit(anon->an_lock);
466 					uvmexp.pdbusy++;
467 					/* someone else owns page, skip it */
468 					continue;
469 				}
470 				uvmexp.pdanscan++;
471 			} else {
472 				uobj = p->uobject;
473 				KASSERT(uobj != NULL);
474 				if (p->pg_flags & PG_BUSY) {
475 					uvmexp.pdbusy++;
476 					/* someone else owns page, skip it */
477 					continue;
478 				}
479 				uvmexp.pdobscan++;
480 			}
481 
482 			/*
483 			 * we now have the page queues locked.
484 			 * the page is not busy.   if the page is clean we
485 			 * can free it now and continue.
486 			 */
487 			if (p->pg_flags & PG_CLEAN) {
488 				if (p->pg_flags & PQ_SWAPBACKED) {
489 					/* this page now lives only in swap */
490 					atomic_inc_int(&uvmexp.swpgonly);
491 				}
492 
493 				/* zap all mappings with pmap_page_protect... */
494 				pmap_page_protect(p, PROT_NONE);
495 				uvm_pagefree(p);
496 				uvmexp.pdfreed++;
497 
498 				if (anon) {
499 
500 					/*
501 					 * an anonymous page can only be clean
502 					 * if it has backing store assigned.
503 					 */
504 
505 					KASSERT(anon->an_swslot != 0);
506 
507 					/* remove from object */
508 					anon->an_page = NULL;
509 					rw_exit(anon->an_lock);
510 				}
511 				continue;
512 			}
513 
514 			/*
515 			 * this page is dirty, skip it if we'll have met our
516 			 * free target when all the current pageouts complete.
517 			 */
518 			if (free + uvmexp.paging > uvmexp.freetarg << 2) {
519 				if (anon) {
520 					rw_exit(anon->an_lock);
521 				}
522 				continue;
523 			}
524 
525 			/*
526 			 * this page is dirty, but we can't page it out
527 			 * since all pages in swap are only in swap.
528 			 * reactivate it so that we eventually cycle
529 			 * all pages thru the inactive queue.
530 			 */
531 			if ((p->pg_flags & PQ_SWAPBACKED) && uvm_swapisfull()) {
532 				dirtyreacts++;
533 				uvm_pageactivate(p);
534 				if (anon) {
535 					rw_exit(anon->an_lock);
536 				}
537 				continue;
538 			}
539 
540 			/*
541 			 * if the page is swap-backed and dirty and swap space
542 			 * is full, free any swap allocated to the page
543 			 * so that other pages can be paged out.
544 			 */
545 			KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
546 			if ((p->pg_flags & PQ_SWAPBACKED) &&
547 			    uvmexp.swpginuse == uvmexp.swpages) {
548 
549 				if ((p->pg_flags & PQ_ANON) &&
550 				    p->uanon->an_swslot) {
551 					uvm_swap_free(p->uanon->an_swslot, 1);
552 					p->uanon->an_swslot = 0;
553 				}
554 				if (p->pg_flags & PQ_AOBJ) {
555 					uao_dropswap(p->uobject,
556 						     p->offset >> PAGE_SHIFT);
557 				}
558 			}
559 
560 			/*
561 			 * the page we are looking at is dirty.   we must
562 			 * clean it before it can be freed.  to do this we
563 			 * first mark the page busy so that no one else will
564 			 * touch the page.   we write protect all the mappings
565 			 * of the page so that no one touches it while it is
566 			 * in I/O.
567 			 */
568 
569 			swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0);
570 			atomic_setbits_int(&p->pg_flags, PG_BUSY);
571 			UVM_PAGE_OWN(p, "scan_inactive");
572 			pmap_page_protect(p, PROT_READ);
573 			uvmexp.pgswapout++;
574 
575 			/*
576 			 * for swap-backed pages we need to (re)allocate
577 			 * swap space.
578 			 */
579 			if (swap_backed) {
580 				/* free old swap slot (if any) */
581 				if (anon) {
582 					if (anon->an_swslot) {
583 						uvm_swap_free(anon->an_swslot,
584 						    1);
585 						anon->an_swslot = 0;
586 					}
587 				} else {
588 					uao_dropswap(uobj,
589 						     p->offset >> PAGE_SHIFT);
590 				}
591 
592 				/* start new cluster (if necessary) */
593 				if (swslot == 0) {
594 					swnpages = MAXBSIZE >> PAGE_SHIFT;
595 					swslot = uvm_swap_alloc(&swnpages,
596 					    TRUE);
597 					if (swslot == 0) {
598 						/* no swap?  give up! */
599 						atomic_clearbits_int(
600 						    &p->pg_flags,
601 						    PG_BUSY);
602 						UVM_PAGE_OWN(p, NULL);
603 						if (anon)
604 							rw_exit(anon->an_lock);
605 						continue;
606 					}
607 					swcpages = 0;	/* cluster is empty */
608 				}
609 
610 				/* add block to cluster */
611 				swpps[swcpages] = p;
612 				if (anon)
613 					anon->an_swslot = swslot + swcpages;
614 				else
615 					uao_set_swslot(uobj,
616 					    p->offset >> PAGE_SHIFT,
617 					    swslot + swcpages);
618 				swcpages++;
619 			}
620 		} else {
621 			/* if p == NULL we must be doing a last swap i/o */
622 			swap_backed = TRUE;
623 		}
624 
625 		/*
626 		 * now consider doing the pageout.
627 		 *
628 		 * for swap-backed pages, we do the pageout if we have either
629 		 * filled the cluster (in which case (swnpages == swcpages) or
630 		 * run out of pages (p == NULL).
631 		 *
632 		 * for object pages, we always do the pageout.
633 		 */
634 		if (swap_backed) {
635 			if (p) {	/* if we just added a page to cluster */
636 				if (anon)
637 					rw_exit(anon->an_lock);
638 
639 				/* cluster not full yet? */
640 				if (swcpages < swnpages)
641 					continue;
642 			}
643 
644 			/* starting I/O now... set up for it */
645 			npages = swcpages;
646 			ppsp = swpps;
647 			/* for swap-backed pages only */
648 			start = (vaddr_t) swslot;
649 
650 			/* if this is final pageout we could have a few
651 			 * extra swap blocks */
652 			if (swcpages < swnpages) {
653 				uvm_swap_free(swslot + swcpages,
654 				    (swnpages - swcpages));
655 			}
656 		} else {
657 			/* normal object pageout */
658 			ppsp = pps;
659 			npages = sizeof(pps) / sizeof(struct vm_page *);
660 			/* not looked at because PGO_ALLPAGES is set */
661 			start = 0;
662 		}
663 
664 		/*
665 		 * now do the pageout.
666 		 *
667 		 * for swap_backed pages we have already built the cluster.
668 		 * for !swap_backed pages, uvm_pager_put will call the object's
669 		 * "make put cluster" function to build a cluster on our behalf.
670 		 *
671 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
672 		 * it to free the cluster pages for us on a successful I/O (it
673 		 * always does this for un-successful I/O requests).  this
674 		 * allows us to do clustered pageout without having to deal
675 		 * with cluster pages at this level.
676 		 *
677 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
678 		 *  IN: locked: page queues
679 		 * OUT: locked:
680 		 *     !locked: pageqs
681 		 */
682 
683 		uvmexp.pdpageouts++;
684 		result = uvm_pager_put(swap_backed ? NULL : uobj, p,
685 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
686 
687 		/*
688 		 * if we did i/o to swap, zero swslot to indicate that we are
689 		 * no longer building a swap-backed cluster.
690 		 */
691 
692 		if (swap_backed)
693 			swslot = 0;		/* done with this cluster */
694 
695 		/*
696 		 * first, we check for VM_PAGER_PEND which means that the
697 		 * async I/O is in progress and the async I/O done routine
698 		 * will clean up after us.   in this case we move on to the
699 		 * next page.
700 		 *
701 		 * there is a very remote chance that the pending async i/o can
702 		 * finish _before_ we get here.   if that happens, our page "p"
703 		 * may no longer be on the inactive queue.   so we verify this
704 		 * when determining the next page (starting over at the head if
705 		 * we've lost our inactive page).
706 		 */
707 
708 		if (result == VM_PAGER_PEND) {
709 			uvmexp.paging += npages;
710 			uvm_lock_pageq();
711 			uvmexp.pdpending++;
712 			if (p) {
713 				if (p->pg_flags & PQ_INACTIVE)
714 					nextpg = TAILQ_NEXT(p, pageq);
715 				else
716 					nextpg = TAILQ_FIRST(pglst);
717 			} else {
718 				nextpg = NULL;
719 			}
720 			continue;
721 		}
722 
723 		/* clean up "p" if we have one */
724 		if (p) {
725 			/*
726 			 * the I/O request to "p" is done and uvm_pager_put
727 			 * has freed any cluster pages it may have allocated
728 			 * during I/O.  all that is left for us to do is
729 			 * clean up page "p" (which is still PG_BUSY).
730 			 *
731 			 * our result could be one of the following:
732 			 *   VM_PAGER_OK: successful pageout
733 			 *
734 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
735 			 *     to next page
736 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
737 			 *     "reactivate" page to get it out of the way (it
738 			 *     will eventually drift back into the inactive
739 			 *     queue for a retry).
740 			 *   VM_PAGER_UNLOCK: should never see this as it is
741 			 *     only valid for "get" operations
742 			 */
743 
744 			/* relock p's object: page queues not lock yet, so
745 			 * no need for "try" */
746 
747 			/* !swap_backed case: already locked... */
748 			if (swap_backed) {
749 				if (anon)
750 					rw_enter(anon->an_lock, RW_WRITE);
751 			}
752 
753 #ifdef DIAGNOSTIC
754 			if (result == VM_PAGER_UNLOCK)
755 				panic("pagedaemon: pageout returned "
756 				    "invalid 'unlock' code");
757 #endif
758 
759 			/* handle PG_WANTED now */
760 			if (p->pg_flags & PG_WANTED)
761 				wakeup(p);
762 
763 			atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED);
764 			UVM_PAGE_OWN(p, NULL);
765 
766 			/* released during I/O? Can only happen for anons */
767 			if (p->pg_flags & PG_RELEASED) {
768 				KASSERT(anon != NULL);
769 				/*
770 				 * remove page so we can get nextpg,
771 				 * also zero out anon so we don't use
772 				 * it after the free.
773 				 */
774 				anon->an_page = NULL;
775 				p->uanon = NULL;
776 
777 				rw_exit(anon->an_lock);
778 				uvm_anfree(anon);	/* kills anon */
779 				pmap_page_protect(p, PROT_NONE);
780 				anon = NULL;
781 				uvm_lock_pageq();
782 				nextpg = TAILQ_NEXT(p, pageq);
783 				/* free released page */
784 				uvm_pagefree(p);
785 			} else {	/* page was not released during I/O */
786 				uvm_lock_pageq();
787 				nextpg = TAILQ_NEXT(p, pageq);
788 				if (result != VM_PAGER_OK) {
789 					/* pageout was a failure... */
790 					if (result != VM_PAGER_AGAIN)
791 						uvm_pageactivate(p);
792 					pmap_clear_reference(p);
793 					/* XXXCDC: if (swap_backed) FREE p's
794 					 * swap block? */
795 				} else {
796 					/* pageout was a success... */
797 					pmap_clear_reference(p);
798 					pmap_clear_modify(p);
799 					atomic_setbits_int(&p->pg_flags,
800 					    PG_CLEAN);
801 				}
802 			}
803 
804 			/*
805 			 * drop object lock (if there is an object left).   do
806 			 * a safety check of nextpg to make sure it is on the
807 			 * inactive queue (it should be since PG_BUSY pages on
808 			 * the inactive queue can't be re-queued [note: not
809 			 * true for active queue]).
810 			 */
811 			if (anon)
812 				rw_exit(anon->an_lock);
813 
814 			if (nextpg && (nextpg->pg_flags & PQ_INACTIVE) == 0) {
815 				nextpg = TAILQ_FIRST(pglst);	/* reload! */
816 			}
817 		} else {
818 			/*
819 			 * if p is null in this loop, make sure it stays null
820 			 * in the next loop.
821 			 */
822 			nextpg = NULL;
823 
824 			/*
825 			 * lock page queues here just so they're always locked
826 			 * at the end of the loop.
827 			 */
828 			uvm_lock_pageq();
829 		}
830 	}
831 	return (retval);
832 }
833 
834 /*
835  * uvmpd_scan: scan the page queues and attempt to meet our targets.
836  *
837  * => called with pageq's locked
838  */
839 
840 void
841 uvmpd_scan(void)
842 {
843 	int free, inactive_shortage, swap_shortage, pages_freed;
844 	struct vm_page *p, *nextpg;
845 	struct uvm_object *uobj;
846 	boolean_t got_it;
847 
848 	MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
849 
850 	uvmexp.pdrevs++;		/* counter */
851 	uobj = NULL;
852 
853 	/*
854 	 * get current "free" page count
855 	 */
856 	free = uvmexp.free - BUFPAGES_DEFICIT;
857 
858 #ifndef __SWAP_BROKEN
859 	/*
860 	 * swap out some processes if we are below our free target.
861 	 * we need to unlock the page queues for this.
862 	 */
863 	if (free < uvmexp.freetarg) {
864 		uvmexp.pdswout++;
865 		uvm_unlock_pageq();
866 		uvm_swapout_threads();
867 		uvm_lock_pageq();
868 	}
869 #endif
870 
871 	/*
872 	 * now we want to work on meeting our targets.   first we work on our
873 	 * free target by converting inactive pages into free pages.  then
874 	 * we work on meeting our inactive target by converting active pages
875 	 * to inactive ones.
876 	 */
877 
878 	/*
879 	 * alternate starting queue between swap and object based on the
880 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
881 	 */
882 	got_it = FALSE;
883 	pages_freed = uvmexp.pdfreed;	/* XXX - int */
884 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
885 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
886 	if (!got_it)
887 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
888 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
889 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
890 	pages_freed = uvmexp.pdfreed - pages_freed;
891 
892 	/*
893 	 * we have done the scan to get free pages.   now we work on meeting
894 	 * our inactive target.
895 	 */
896 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT;
897 
898 	/*
899 	 * detect if we're not going to be able to page anything out
900 	 * until we free some swap resources from active pages.
901 	 */
902 	swap_shortage = 0;
903 	if (uvmexp.free < uvmexp.freetarg &&
904 	    uvmexp.swpginuse == uvmexp.swpages &&
905 	    !uvm_swapisfull() &&
906 	    pages_freed == 0) {
907 		swap_shortage = uvmexp.freetarg - uvmexp.free;
908 	}
909 
910 	for (p = TAILQ_FIRST(&uvm.page_active);
911 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
912 	     p = nextpg) {
913 		nextpg = TAILQ_NEXT(p, pageq);
914 
915 		/* skip this page if it's busy. */
916 		if (p->pg_flags & PG_BUSY)
917 			continue;
918 
919 		if (p->pg_flags & PQ_ANON) {
920 			KASSERT(p->uanon != NULL);
921 			if (rw_enter(p->uanon->an_lock, RW_WRITE|RW_NOSLEEP))
922 				continue;
923 		} else
924 			KASSERT(p->uobject != NULL);
925 
926 		/*
927 		 * if there's a shortage of swap, free any swap allocated
928 		 * to this page so that other pages can be paged out.
929 		 */
930 		if (swap_shortage > 0) {
931 			if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) {
932 				uvm_swap_free(p->uanon->an_swslot, 1);
933 				p->uanon->an_swslot = 0;
934 				atomic_clearbits_int(&p->pg_flags, PG_CLEAN);
935 				swap_shortage--;
936 			}
937 			if (p->pg_flags & PQ_AOBJ) {
938 				int slot = uao_set_swslot(p->uobject,
939 					p->offset >> PAGE_SHIFT, 0);
940 				if (slot) {
941 					uvm_swap_free(slot, 1);
942 					atomic_clearbits_int(&p->pg_flags,
943 					    PG_CLEAN);
944 					swap_shortage--;
945 				}
946 			}
947 		}
948 
949 		/*
950 		 * deactivate this page if there's a shortage of
951 		 * inactive pages.
952 		 */
953 		if (inactive_shortage > 0) {
954 			pmap_page_protect(p, PROT_NONE);
955 			/* no need to check wire_count as pg is "active" */
956 			uvm_pagedeactivate(p);
957 			uvmexp.pddeact++;
958 			inactive_shortage--;
959 		}
960 		if (p->pg_flags & PQ_ANON)
961 			rw_exit(p->uanon->an_lock);
962 	}
963 }
964 
965 #ifdef HIBERNATE
966 
967 /*
968  * uvmpd_drop: drop clean pages from list
969  */
970 void
971 uvmpd_drop(struct pglist *pglst)
972 {
973 	struct vm_page *p, *nextpg;
974 
975 	for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) {
976 		nextpg = TAILQ_NEXT(p, pageq);
977 
978 		if (p->pg_flags & PQ_ANON || p->uobject == NULL)
979 			continue;
980 
981 		if (p->pg_flags & PG_BUSY)
982 			continue;
983 
984 		if (p->pg_flags & PG_CLEAN) {
985 			/*
986 			 * we now have the page queues locked.
987 			 * the page is not busy.   if the page is clean we
988 			 * can free it now and continue.
989 			 */
990 			if (p->pg_flags & PG_CLEAN) {
991 				if (p->pg_flags & PQ_SWAPBACKED) {
992 					/* this page now lives only in swap */
993 					atomic_inc_int(&uvmexp.swpgonly);
994 				}
995 
996 				/* zap all mappings with pmap_page_protect... */
997 				pmap_page_protect(p, PROT_NONE);
998 				uvm_pagefree(p);
999 			}
1000 		}
1001 	}
1002 }
1003 
1004 void
1005 uvmpd_hibernate(void)
1006 {
1007 	uvm_lock_pageq();
1008 
1009 	uvmpd_drop(&uvm.page_inactive_swp);
1010 	uvmpd_drop(&uvm.page_inactive_obj);
1011 	uvmpd_drop(&uvm.page_active);
1012 
1013 	uvm_unlock_pageq();
1014 }
1015 
1016 #endif
1017