1 /* $OpenBSD: uvm_pdaemon.c,v 1.88 2020/11/24 13:49:09 mpi Exp $ */ 2 /* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94 38 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * uvm_pdaemon.c: the page daemon 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/pool.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/mount.h> 76 #include <sys/atomic.h> 77 78 #ifdef HIBERNATE 79 #include <sys/hibernate.h> 80 #endif 81 82 #include <uvm/uvm.h> 83 84 #if defined(__amd64__) || defined(__arm64__) || \ 85 defined(__i386__) || defined(__loongson__) || \ 86 defined(__macppc__) || defined(__sparc64__) 87 #include "drm.h" 88 #endif 89 90 #if NDRM > 0 91 extern void drmbackoff(long); 92 #endif 93 94 /* 95 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate 96 * in a pass thru the inactive list when swap is full. the value should be 97 * "small"... if it's too large we'll cycle the active pages thru the inactive 98 * queue too quickly to for them to be referenced and avoid being freed. 99 */ 100 101 #define UVMPD_NUMDIRTYREACTS 16 102 103 104 /* 105 * local prototypes 106 */ 107 108 void uvmpd_scan(void); 109 boolean_t uvmpd_scan_inactive(struct pglist *); 110 void uvmpd_tune(void); 111 void uvmpd_drop(struct pglist *); 112 113 /* 114 * uvm_wait: wait (sleep) for the page daemon to free some pages 115 * 116 * => should be called with all locks released 117 * => should _not_ be called by the page daemon (to avoid deadlock) 118 */ 119 120 void 121 uvm_wait(const char *wmsg) 122 { 123 uint64_t timo = INFSLP; 124 125 #ifdef DIAGNOSTIC 126 if (curproc == &proc0) 127 panic("%s: cannot sleep for memory during boot", __func__); 128 #endif 129 130 /* check for page daemon going to sleep (waiting for itself) */ 131 if (curproc == uvm.pagedaemon_proc) { 132 printf("uvm_wait emergency bufbackoff\n"); 133 if (bufbackoff(NULL, 4) == 0) 134 return; 135 /* 136 * now we have a problem: the pagedaemon wants to go to 137 * sleep until it frees more memory. but how can it 138 * free more memory if it is asleep? that is a deadlock. 139 * we have two options: 140 * [1] panic now 141 * [2] put a timeout on the sleep, thus causing the 142 * pagedaemon to only pause (rather than sleep forever) 143 * 144 * note that option [2] will only help us if we get lucky 145 * and some other process on the system breaks the deadlock 146 * by exiting or freeing memory (thus allowing the pagedaemon 147 * to continue). for now we panic if DEBUG is defined, 148 * otherwise we hope for the best with option [2] (better 149 * yet, this should never happen in the first place!). 150 */ 151 152 printf("pagedaemon: deadlock detected!\n"); 153 timo = MSEC_TO_NSEC(125); /* set timeout */ 154 #if defined(DEBUG) 155 /* DEBUG: panic so we can debug it */ 156 panic("pagedaemon deadlock"); 157 #endif 158 } 159 160 uvm_lock_fpageq(); 161 wakeup(&uvm.pagedaemon); /* wake the daemon! */ 162 msleep_nsec(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo); 163 } 164 165 /* 166 * uvmpd_tune: tune paging parameters 167 * 168 * => called whenever memory is added to (or removed from?) the system 169 * => caller must call with page queues locked 170 */ 171 172 void 173 uvmpd_tune(void) 174 { 175 176 uvmexp.freemin = uvmexp.npages / 30; 177 178 /* between 16k and 512k */ 179 /* XXX: what are these values good for? */ 180 uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT); 181 #if 0 182 uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT); 183 #endif 184 185 /* Make sure there's always a user page free. */ 186 if (uvmexp.freemin < uvmexp.reserve_kernel + 1) 187 uvmexp.freemin = uvmexp.reserve_kernel + 1; 188 189 uvmexp.freetarg = (uvmexp.freemin * 4) / 3; 190 if (uvmexp.freetarg <= uvmexp.freemin) 191 uvmexp.freetarg = uvmexp.freemin + 1; 192 193 /* uvmexp.inactarg: computed in main daemon loop */ 194 195 uvmexp.wiredmax = uvmexp.npages / 3; 196 } 197 198 /* 199 * Indicate to the page daemon that a nowait call failed and it should 200 * recover at least some memory in the most restricted region (assumed 201 * to be dma_constraint). 202 */ 203 volatile int uvm_nowait_failed; 204 205 /* 206 * uvm_pageout: the main loop for the pagedaemon 207 */ 208 void 209 uvm_pageout(void *arg) 210 { 211 struct uvm_constraint_range constraint; 212 struct uvm_pmalloc *pma; 213 int npages = 0; 214 215 /* ensure correct priority and set paging parameters... */ 216 uvm.pagedaemon_proc = curproc; 217 (void) spl0(); 218 uvm_lock_pageq(); 219 npages = uvmexp.npages; 220 uvmpd_tune(); 221 uvm_unlock_pageq(); 222 223 for (;;) { 224 long size; 225 226 uvm_lock_fpageq(); 227 if (!uvm_nowait_failed && TAILQ_EMPTY(&uvm.pmr_control.allocs)) { 228 msleep_nsec(&uvm.pagedaemon, &uvm.fpageqlock, PVM, 229 "pgdaemon", INFSLP); 230 uvmexp.pdwoke++; 231 } 232 233 if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) { 234 pma->pm_flags |= UVM_PMA_BUSY; 235 constraint = pma->pm_constraint; 236 } else { 237 if (uvm_nowait_failed) { 238 /* 239 * XXX realisticly, this is what our 240 * nowait callers probably care about 241 */ 242 constraint = dma_constraint; 243 uvm_nowait_failed = 0; 244 } else 245 constraint = no_constraint; 246 } 247 248 uvm_unlock_fpageq(); 249 250 /* now lock page queues and recompute inactive count */ 251 uvm_lock_pageq(); 252 if (npages != uvmexp.npages) { /* check for new pages? */ 253 npages = uvmexp.npages; 254 uvmpd_tune(); 255 } 256 257 uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3; 258 if (uvmexp.inactarg <= uvmexp.freetarg) { 259 uvmexp.inactarg = uvmexp.freetarg + 1; 260 } 261 262 /* Reclaim pages from the buffer cache if possible. */ 263 size = 0; 264 if (pma != NULL) 265 size += pma->pm_size >> PAGE_SHIFT; 266 if (uvmexp.free - BUFPAGES_DEFICIT < uvmexp.freetarg) 267 size += uvmexp.freetarg - (uvmexp.free - 268 BUFPAGES_DEFICIT); 269 if (size == 0) 270 size = 16; /* XXX */ 271 uvm_unlock_pageq(); 272 (void) bufbackoff(&constraint, size * 2); 273 #if NDRM > 0 274 drmbackoff(size * 2); 275 #endif 276 uvm_lock_pageq(); 277 278 /* Scan if needed to meet our targets. */ 279 if (pma != NULL || 280 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) || 281 ((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) { 282 uvmpd_scan(); 283 } 284 285 /* 286 * if there's any free memory to be had, 287 * wake up any waiters. 288 */ 289 uvm_lock_fpageq(); 290 if (uvmexp.free > uvmexp.reserve_kernel || 291 uvmexp.paging == 0) { 292 wakeup(&uvmexp.free); 293 } 294 295 if (pma != NULL) { 296 /* 297 * XXX If UVM_PMA_FREED isn't set, no pages 298 * were freed. Should we set UVM_PMA_FAIL in 299 * that case? 300 */ 301 pma->pm_flags &= ~UVM_PMA_BUSY; 302 if (pma->pm_flags & UVM_PMA_FREED) { 303 pma->pm_flags &= ~UVM_PMA_LINKED; 304 TAILQ_REMOVE(&uvm.pmr_control.allocs, pma, 305 pmq); 306 wakeup(pma); 307 } 308 } 309 uvm_unlock_fpageq(); 310 311 /* scan done. unlock page queues (only lock we are holding) */ 312 uvm_unlock_pageq(); 313 314 sched_pause(yield); 315 } 316 /*NOTREACHED*/ 317 } 318 319 320 /* 321 * uvm_aiodone_daemon: main loop for the aiodone daemon. 322 */ 323 void 324 uvm_aiodone_daemon(void *arg) 325 { 326 int s, free; 327 struct buf *bp, *nbp; 328 329 uvm.aiodoned_proc = curproc; 330 331 for (;;) { 332 /* 333 * Check for done aio structures. If we've got structures to 334 * process, do so. Otherwise sleep while avoiding races. 335 */ 336 mtx_enter(&uvm.aiodoned_lock); 337 while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL) 338 msleep_nsec(&uvm.aiodoned, &uvm.aiodoned_lock, 339 PVM, "aiodoned", INFSLP); 340 /* Take the list for ourselves. */ 341 TAILQ_INIT(&uvm.aio_done); 342 mtx_leave(&uvm.aiodoned_lock); 343 344 /* process each i/o that's done. */ 345 free = uvmexp.free; 346 while (bp != NULL) { 347 if (bp->b_flags & B_PDAEMON) { 348 uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT; 349 } 350 nbp = TAILQ_NEXT(bp, b_freelist); 351 s = splbio(); /* b_iodone must by called at splbio */ 352 (*bp->b_iodone)(bp); 353 splx(s); 354 bp = nbp; 355 356 sched_pause(yield); 357 } 358 uvm_lock_fpageq(); 359 wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon : 360 &uvmexp.free); 361 uvm_unlock_fpageq(); 362 } 363 } 364 365 366 367 /* 368 * uvmpd_scan_inactive: scan an inactive list for pages to clean or free. 369 * 370 * => called with page queues locked 371 * => we work on meeting our free target by converting inactive pages 372 * into free pages. 373 * => we handle the building of swap-backed clusters 374 * => we return TRUE if we are exiting because we met our target 375 */ 376 377 boolean_t 378 uvmpd_scan_inactive(struct pglist *pglst) 379 { 380 boolean_t retval = FALSE; /* assume we haven't hit target */ 381 int free, result; 382 struct vm_page *p, *nextpg; 383 struct uvm_object *uobj; 384 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; 385 int npages; 386 struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; /* XXX: see below */ 387 int swnpages, swcpages; /* XXX: see below */ 388 int swslot; 389 struct vm_anon *anon; 390 boolean_t swap_backed; 391 vaddr_t start; 392 int dirtyreacts; 393 394 /* 395 * note: we currently keep swap-backed pages on a separate inactive 396 * list from object-backed pages. however, merging the two lists 397 * back together again hasn't been ruled out. thus, we keep our 398 * swap cluster in "swpps" rather than in pps (allows us to mix 399 * clustering types in the event of a mixed inactive queue). 400 */ 401 /* 402 * swslot is non-zero if we are building a swap cluster. we want 403 * to stay in the loop while we have a page to scan or we have 404 * a swap-cluster to build. 405 */ 406 swslot = 0; 407 swnpages = swcpages = 0; 408 free = 0; 409 dirtyreacts = 0; 410 411 for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) { 412 /* 413 * note that p can be NULL iff we have traversed the whole 414 * list and need to do one final swap-backed clustered pageout. 415 */ 416 uobj = NULL; 417 anon = NULL; 418 419 if (p) { 420 /* 421 * update our copy of "free" and see if we've met 422 * our target 423 */ 424 free = uvmexp.free - BUFPAGES_DEFICIT; 425 426 if (free + uvmexp.paging >= uvmexp.freetarg << 2 || 427 dirtyreacts == UVMPD_NUMDIRTYREACTS) { 428 retval = TRUE; 429 430 if (swslot == 0) { 431 /* exit now if no swap-i/o pending */ 432 break; 433 } 434 435 /* set p to null to signal final swap i/o */ 436 p = NULL; 437 } 438 } 439 440 if (p) { /* if (we have a new page to consider) */ 441 /* 442 * we are below target and have a new page to consider. 443 */ 444 uvmexp.pdscans++; 445 nextpg = TAILQ_NEXT(p, pageq); 446 447 /* 448 * move referenced pages back to active queue and 449 * skip to next page (unlikely to happen since 450 * inactive pages shouldn't have any valid mappings 451 * and we cleared reference before deactivating). 452 */ 453 454 if (pmap_is_referenced(p)) { 455 uvm_pageactivate(p); 456 uvmexp.pdreact++; 457 continue; 458 } 459 460 if (p->pg_flags & PQ_ANON) { 461 anon = p->uanon; 462 KASSERT(anon != NULL); 463 if (p->pg_flags & PG_BUSY) { 464 uvmexp.pdbusy++; 465 /* someone else owns page, skip it */ 466 continue; 467 } 468 uvmexp.pdanscan++; 469 } else { 470 uobj = p->uobject; 471 KASSERT(uobj != NULL); 472 if (p->pg_flags & PG_BUSY) { 473 uvmexp.pdbusy++; 474 /* someone else owns page, skip it */ 475 continue; 476 } 477 uvmexp.pdobscan++; 478 } 479 480 /* 481 * we now have the page queues locked. 482 * the page is not busy. if the page is clean we 483 * can free it now and continue. 484 */ 485 if (p->pg_flags & PG_CLEAN) { 486 if (p->pg_flags & PQ_SWAPBACKED) { 487 /* this page now lives only in swap */ 488 uvmexp.swpgonly++; 489 } 490 491 /* zap all mappings with pmap_page_protect... */ 492 pmap_page_protect(p, PROT_NONE); 493 uvm_pagefree(p); 494 uvmexp.pdfreed++; 495 496 if (anon) { 497 498 /* 499 * an anonymous page can only be clean 500 * if it has backing store assigned. 501 */ 502 503 KASSERT(anon->an_swslot != 0); 504 505 /* remove from object */ 506 anon->an_page = NULL; 507 } 508 continue; 509 } 510 511 /* 512 * this page is dirty, skip it if we'll have met our 513 * free target when all the current pageouts complete. 514 */ 515 if (free + uvmexp.paging > uvmexp.freetarg << 2) { 516 continue; 517 } 518 519 /* 520 * this page is dirty, but we can't page it out 521 * since all pages in swap are only in swap. 522 * reactivate it so that we eventually cycle 523 * all pages thru the inactive queue. 524 */ 525 if ((p->pg_flags & PQ_SWAPBACKED) && uvm_swapisfull()) { 526 dirtyreacts++; 527 uvm_pageactivate(p); 528 continue; 529 } 530 531 /* 532 * if the page is swap-backed and dirty and swap space 533 * is full, free any swap allocated to the page 534 * so that other pages can be paged out. 535 */ 536 KASSERT(uvmexp.swpginuse <= uvmexp.swpages); 537 if ((p->pg_flags & PQ_SWAPBACKED) && 538 uvmexp.swpginuse == uvmexp.swpages) { 539 540 if ((p->pg_flags & PQ_ANON) && 541 p->uanon->an_swslot) { 542 uvm_swap_free(p->uanon->an_swslot, 1); 543 p->uanon->an_swslot = 0; 544 } 545 if (p->pg_flags & PQ_AOBJ) { 546 uao_dropswap(p->uobject, 547 p->offset >> PAGE_SHIFT); 548 } 549 } 550 551 /* 552 * the page we are looking at is dirty. we must 553 * clean it before it can be freed. to do this we 554 * first mark the page busy so that no one else will 555 * touch the page. we write protect all the mappings 556 * of the page so that no one touches it while it is 557 * in I/O. 558 */ 559 560 swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0); 561 atomic_setbits_int(&p->pg_flags, PG_BUSY); 562 UVM_PAGE_OWN(p, "scan_inactive"); 563 pmap_page_protect(p, PROT_READ); 564 uvmexp.pgswapout++; 565 566 /* 567 * for swap-backed pages we need to (re)allocate 568 * swap space. 569 */ 570 if (swap_backed) { 571 /* free old swap slot (if any) */ 572 if (anon) { 573 if (anon->an_swslot) { 574 uvm_swap_free(anon->an_swslot, 575 1); 576 anon->an_swslot = 0; 577 } 578 } else { 579 uao_dropswap(uobj, 580 p->offset >> PAGE_SHIFT); 581 } 582 583 /* start new cluster (if necessary) */ 584 if (swslot == 0) { 585 swnpages = MAXBSIZE >> PAGE_SHIFT; 586 swslot = uvm_swap_alloc(&swnpages, 587 TRUE); 588 if (swslot == 0) { 589 /* no swap? give up! */ 590 atomic_clearbits_int( 591 &p->pg_flags, 592 PG_BUSY); 593 UVM_PAGE_OWN(p, NULL); 594 continue; 595 } 596 swcpages = 0; /* cluster is empty */ 597 } 598 599 /* add block to cluster */ 600 swpps[swcpages] = p; 601 if (anon) 602 anon->an_swslot = swslot + swcpages; 603 else 604 uao_set_swslot(uobj, 605 p->offset >> PAGE_SHIFT, 606 swslot + swcpages); 607 swcpages++; 608 } 609 } else { 610 /* if p == NULL we must be doing a last swap i/o */ 611 swap_backed = TRUE; 612 } 613 614 /* 615 * now consider doing the pageout. 616 * 617 * for swap-backed pages, we do the pageout if we have either 618 * filled the cluster (in which case (swnpages == swcpages) or 619 * run out of pages (p == NULL). 620 * 621 * for object pages, we always do the pageout. 622 */ 623 if (swap_backed) { 624 if (p) { /* if we just added a page to cluster */ 625 /* cluster not full yet? */ 626 if (swcpages < swnpages) 627 continue; 628 } 629 630 /* starting I/O now... set up for it */ 631 npages = swcpages; 632 ppsp = swpps; 633 /* for swap-backed pages only */ 634 start = (vaddr_t) swslot; 635 636 /* if this is final pageout we could have a few 637 * extra swap blocks */ 638 if (swcpages < swnpages) { 639 uvm_swap_free(swslot + swcpages, 640 (swnpages - swcpages)); 641 } 642 } else { 643 /* normal object pageout */ 644 ppsp = pps; 645 npages = sizeof(pps) / sizeof(struct vm_page *); 646 /* not looked at because PGO_ALLPAGES is set */ 647 start = 0; 648 } 649 650 /* 651 * now do the pageout. 652 * 653 * for swap_backed pages we have already built the cluster. 654 * for !swap_backed pages, uvm_pager_put will call the object's 655 * "make put cluster" function to build a cluster on our behalf. 656 * 657 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct 658 * it to free the cluster pages for us on a successful I/O (it 659 * always does this for un-successful I/O requests). this 660 * allows us to do clustered pageout without having to deal 661 * with cluster pages at this level. 662 * 663 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST: 664 * IN: locked: page queues 665 * OUT: locked: 666 * !locked: pageqs 667 */ 668 669 uvmexp.pdpageouts++; 670 result = uvm_pager_put(swap_backed ? NULL : uobj, p, 671 &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0); 672 673 /* 674 * if we did i/o to swap, zero swslot to indicate that we are 675 * no longer building a swap-backed cluster. 676 */ 677 678 if (swap_backed) 679 swslot = 0; /* done with this cluster */ 680 681 /* 682 * first, we check for VM_PAGER_PEND which means that the 683 * async I/O is in progress and the async I/O done routine 684 * will clean up after us. in this case we move on to the 685 * next page. 686 * 687 * there is a very remote chance that the pending async i/o can 688 * finish _before_ we get here. if that happens, our page "p" 689 * may no longer be on the inactive queue. so we verify this 690 * when determining the next page (starting over at the head if 691 * we've lost our inactive page). 692 */ 693 694 if (result == VM_PAGER_PEND) { 695 uvmexp.paging += npages; 696 uvm_lock_pageq(); 697 uvmexp.pdpending++; 698 if (p) { 699 if (p->pg_flags & PQ_INACTIVE) 700 nextpg = TAILQ_NEXT(p, pageq); 701 else 702 nextpg = TAILQ_FIRST(pglst); 703 } else { 704 nextpg = NULL; 705 } 706 continue; 707 } 708 709 /* clean up "p" if we have one */ 710 if (p) { 711 /* 712 * the I/O request to "p" is done and uvm_pager_put 713 * has freed any cluster pages it may have allocated 714 * during I/O. all that is left for us to do is 715 * clean up page "p" (which is still PG_BUSY). 716 * 717 * our result could be one of the following: 718 * VM_PAGER_OK: successful pageout 719 * 720 * VM_PAGER_AGAIN: tmp resource shortage, we skip 721 * to next page 722 * VM_PAGER_{FAIL,ERROR,BAD}: an error. we 723 * "reactivate" page to get it out of the way (it 724 * will eventually drift back into the inactive 725 * queue for a retry). 726 * VM_PAGER_UNLOCK: should never see this as it is 727 * only valid for "get" operations 728 */ 729 730 /* relock p's object: page queues not lock yet, so 731 * no need for "try" */ 732 733 #ifdef DIAGNOSTIC 734 if (result == VM_PAGER_UNLOCK) 735 panic("pagedaemon: pageout returned " 736 "invalid 'unlock' code"); 737 #endif 738 739 /* handle PG_WANTED now */ 740 if (p->pg_flags & PG_WANTED) 741 wakeup(p); 742 743 atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED); 744 UVM_PAGE_OWN(p, NULL); 745 746 /* released during I/O? Can only happen for anons */ 747 if (p->pg_flags & PG_RELEASED) { 748 KASSERT(anon != NULL); 749 /* 750 * remove page so we can get nextpg, 751 * also zero out anon so we don't use 752 * it after the free. 753 */ 754 anon->an_page = NULL; 755 p->uanon = NULL; 756 757 uvm_anfree(anon); /* kills anon */ 758 pmap_page_protect(p, PROT_NONE); 759 anon = NULL; 760 uvm_lock_pageq(); 761 nextpg = TAILQ_NEXT(p, pageq); 762 /* free released page */ 763 uvm_pagefree(p); 764 } else { /* page was not released during I/O */ 765 uvm_lock_pageq(); 766 nextpg = TAILQ_NEXT(p, pageq); 767 if (result != VM_PAGER_OK) { 768 /* pageout was a failure... */ 769 if (result != VM_PAGER_AGAIN) 770 uvm_pageactivate(p); 771 pmap_clear_reference(p); 772 /* XXXCDC: if (swap_backed) FREE p's 773 * swap block? */ 774 } else { 775 /* pageout was a success... */ 776 pmap_clear_reference(p); 777 pmap_clear_modify(p); 778 atomic_setbits_int(&p->pg_flags, 779 PG_CLEAN); 780 } 781 } 782 783 /* 784 * drop object lock (if there is an object left). do 785 * a safety check of nextpg to make sure it is on the 786 * inactive queue (it should be since PG_BUSY pages on 787 * the inactive queue can't be re-queued [note: not 788 * true for active queue]). 789 */ 790 791 if (nextpg && (nextpg->pg_flags & PQ_INACTIVE) == 0) { 792 nextpg = TAILQ_FIRST(pglst); /* reload! */ 793 } 794 } else { 795 /* 796 * if p is null in this loop, make sure it stays null 797 * in the next loop. 798 */ 799 nextpg = NULL; 800 801 /* 802 * lock page queues here just so they're always locked 803 * at the end of the loop. 804 */ 805 uvm_lock_pageq(); 806 } 807 } 808 return (retval); 809 } 810 811 /* 812 * uvmpd_scan: scan the page queues and attempt to meet our targets. 813 * 814 * => called with pageq's locked 815 */ 816 817 void 818 uvmpd_scan(void) 819 { 820 int free, inactive_shortage, swap_shortage, pages_freed; 821 struct vm_page *p, *nextpg; 822 struct uvm_object *uobj; 823 boolean_t got_it; 824 825 MUTEX_ASSERT_LOCKED(&uvm.pageqlock); 826 827 uvmexp.pdrevs++; /* counter */ 828 uobj = NULL; 829 830 /* 831 * get current "free" page count 832 */ 833 free = uvmexp.free - BUFPAGES_DEFICIT; 834 835 #ifndef __SWAP_BROKEN 836 /* 837 * swap out some processes if we are below our free target. 838 * we need to unlock the page queues for this. 839 */ 840 if (free < uvmexp.freetarg) { 841 uvmexp.pdswout++; 842 uvm_unlock_pageq(); 843 uvm_swapout_threads(); 844 uvm_lock_pageq(); 845 } 846 #endif 847 848 /* 849 * now we want to work on meeting our targets. first we work on our 850 * free target by converting inactive pages into free pages. then 851 * we work on meeting our inactive target by converting active pages 852 * to inactive ones. 853 */ 854 855 /* 856 * alternate starting queue between swap and object based on the 857 * low bit of uvmexp.pdrevs (which we bump by one each call). 858 */ 859 got_it = FALSE; 860 pages_freed = uvmexp.pdfreed; /* XXX - int */ 861 if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0) 862 got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp); 863 if (!got_it) 864 got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj); 865 if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0) 866 (void) uvmpd_scan_inactive(&uvm.page_inactive_swp); 867 pages_freed = uvmexp.pdfreed - pages_freed; 868 869 /* 870 * we have done the scan to get free pages. now we work on meeting 871 * our inactive target. 872 */ 873 inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT; 874 875 /* 876 * detect if we're not going to be able to page anything out 877 * until we free some swap resources from active pages. 878 */ 879 swap_shortage = 0; 880 if (uvmexp.free < uvmexp.freetarg && 881 uvmexp.swpginuse == uvmexp.swpages && 882 !uvm_swapisfull() && 883 pages_freed == 0) { 884 swap_shortage = uvmexp.freetarg - uvmexp.free; 885 } 886 887 for (p = TAILQ_FIRST(&uvm.page_active); 888 p != NULL && (inactive_shortage > 0 || swap_shortage > 0); 889 p = nextpg) { 890 nextpg = TAILQ_NEXT(p, pageq); 891 892 /* skip this page if it's busy. */ 893 if (p->pg_flags & PG_BUSY) 894 continue; 895 896 if (p->pg_flags & PQ_ANON) 897 KASSERT(p->uanon != NULL); 898 else 899 KASSERT(p->uobject != NULL); 900 901 /* 902 * if there's a shortage of swap, free any swap allocated 903 * to this page so that other pages can be paged out. 904 */ 905 if (swap_shortage > 0) { 906 if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) { 907 uvm_swap_free(p->uanon->an_swslot, 1); 908 p->uanon->an_swslot = 0; 909 atomic_clearbits_int(&p->pg_flags, PG_CLEAN); 910 swap_shortage--; 911 } 912 if (p->pg_flags & PQ_AOBJ) { 913 int slot = uao_set_swslot(p->uobject, 914 p->offset >> PAGE_SHIFT, 0); 915 if (slot) { 916 uvm_swap_free(slot, 1); 917 atomic_clearbits_int(&p->pg_flags, 918 PG_CLEAN); 919 swap_shortage--; 920 } 921 } 922 } 923 924 /* 925 * deactivate this page if there's a shortage of 926 * inactive pages. 927 */ 928 if (inactive_shortage > 0) { 929 pmap_page_protect(p, PROT_NONE); 930 /* no need to check wire_count as pg is "active" */ 931 uvm_pagedeactivate(p); 932 uvmexp.pddeact++; 933 inactive_shortage--; 934 } 935 } 936 } 937 938 #ifdef HIBERNATE 939 940 /* 941 * uvmpd_drop: drop clean pages from list 942 */ 943 void 944 uvmpd_drop(struct pglist *pglst) 945 { 946 struct vm_page *p, *nextpg; 947 948 for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) { 949 nextpg = TAILQ_NEXT(p, pageq); 950 951 if (p->pg_flags & PQ_ANON || p->uobject == NULL) 952 continue; 953 954 if (p->pg_flags & PG_BUSY) 955 continue; 956 957 if (p->pg_flags & PG_CLEAN) { 958 /* 959 * we now have the page queues locked. 960 * the page is not busy. if the page is clean we 961 * can free it now and continue. 962 */ 963 if (p->pg_flags & PG_CLEAN) { 964 if (p->pg_flags & PQ_SWAPBACKED) { 965 /* this page now lives only in swap */ 966 uvmexp.swpgonly++; 967 } 968 969 /* zap all mappings with pmap_page_protect... */ 970 pmap_page_protect(p, PROT_NONE); 971 uvm_pagefree(p); 972 } 973 } 974 } 975 } 976 977 void 978 uvmpd_hibernate(void) 979 { 980 uvm_lock_pageq(); 981 982 uvmpd_drop(&uvm.page_inactive_swp); 983 uvmpd_drop(&uvm.page_inactive_obj); 984 uvmpd_drop(&uvm.page_active); 985 986 uvm_unlock_pageq(); 987 } 988 989 #endif 990