1 /* $OpenBSD: uvm_pdaemon.c,v 1.82 2019/05/10 02:33:57 beck Exp $ */ 2 /* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94 38 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * uvm_pdaemon.c: the page daemon 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/pool.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/mount.h> 76 #include <sys/atomic.h> 77 78 #ifdef HIBERNATE 79 #include <sys/hibernate.h> 80 #endif 81 82 #include <uvm/uvm.h> 83 84 /* 85 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate 86 * in a pass thru the inactive list when swap is full. the value should be 87 * "small"... if it's too large we'll cycle the active pages thru the inactive 88 * queue too quickly to for them to be referenced and avoid being freed. 89 */ 90 91 #define UVMPD_NUMDIRTYREACTS 16 92 93 94 /* 95 * local prototypes 96 */ 97 98 void uvmpd_scan(void); 99 boolean_t uvmpd_scan_inactive(struct pglist *); 100 void uvmpd_tune(void); 101 void uvmpd_drop(struct pglist *); 102 103 /* 104 * uvm_wait: wait (sleep) for the page daemon to free some pages 105 * 106 * => should be called with all locks released 107 * => should _not_ be called by the page daemon (to avoid deadlock) 108 */ 109 110 void 111 uvm_wait(const char *wmsg) 112 { 113 int timo = 0; 114 115 #ifdef DIAGNOSTIC 116 if (curproc == &proc0) 117 panic("%s: cannot sleep for memory during boot", __func__); 118 #endif 119 120 /* check for page daemon going to sleep (waiting for itself) */ 121 if (curproc == uvm.pagedaemon_proc) { 122 printf("uvm_wait emergency bufbackoff\n"); 123 if (bufbackoff(NULL, 4) == 0) 124 return; 125 /* 126 * now we have a problem: the pagedaemon wants to go to 127 * sleep until it frees more memory. but how can it 128 * free more memory if it is asleep? that is a deadlock. 129 * we have two options: 130 * [1] panic now 131 * [2] put a timeout on the sleep, thus causing the 132 * pagedaemon to only pause (rather than sleep forever) 133 * 134 * note that option [2] will only help us if we get lucky 135 * and some other process on the system breaks the deadlock 136 * by exiting or freeing memory (thus allowing the pagedaemon 137 * to continue). for now we panic if DEBUG is defined, 138 * otherwise we hope for the best with option [2] (better 139 * yet, this should never happen in the first place!). 140 */ 141 142 printf("pagedaemon: deadlock detected!\n"); 143 timo = hz >> 3; /* set timeout */ 144 #if defined(DEBUG) 145 /* DEBUG: panic so we can debug it */ 146 panic("pagedaemon deadlock"); 147 #endif 148 } 149 150 uvm_lock_fpageq(); 151 wakeup(&uvm.pagedaemon); /* wake the daemon! */ 152 msleep(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo); 153 } 154 155 /* 156 * uvmpd_tune: tune paging parameters 157 * 158 * => called whenever memory is added to (or removed from?) the system 159 * => caller must call with page queues locked 160 */ 161 162 void 163 uvmpd_tune(void) 164 { 165 166 uvmexp.freemin = uvmexp.npages / 30; 167 168 /* between 16k and 512k */ 169 /* XXX: what are these values good for? */ 170 uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT); 171 #if 0 172 uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT); 173 #endif 174 175 /* Make sure there's always a user page free. */ 176 if (uvmexp.freemin < uvmexp.reserve_kernel + 1) 177 uvmexp.freemin = uvmexp.reserve_kernel + 1; 178 179 uvmexp.freetarg = (uvmexp.freemin * 4) / 3; 180 if (uvmexp.freetarg <= uvmexp.freemin) 181 uvmexp.freetarg = uvmexp.freemin + 1; 182 183 /* uvmexp.inactarg: computed in main daemon loop */ 184 185 uvmexp.wiredmax = uvmexp.npages / 3; 186 } 187 188 /* 189 * Indicate to the page daemon that a nowait call failed and it should 190 * recover at least some memory in the most restricted region (assumed 191 * to be dma_constraint). 192 */ 193 volatile int uvm_nowait_failed; 194 195 /* 196 * uvm_pageout: the main loop for the pagedaemon 197 */ 198 void 199 uvm_pageout(void *arg) 200 { 201 struct uvm_constraint_range constraint; 202 struct uvm_pmalloc *pma; 203 int work_done; 204 int npages = 0; 205 206 /* ensure correct priority and set paging parameters... */ 207 uvm.pagedaemon_proc = curproc; 208 (void) spl0(); 209 uvm_lock_pageq(); 210 npages = uvmexp.npages; 211 uvmpd_tune(); 212 uvm_unlock_pageq(); 213 214 for (;;) { 215 long size; 216 work_done = 0; /* No work done this iteration. */ 217 218 uvm_lock_fpageq(); 219 if (!uvm_nowait_failed && TAILQ_EMPTY(&uvm.pmr_control.allocs)) { 220 msleep(&uvm.pagedaemon, &uvm.fpageqlock, PVM, 221 "pgdaemon", 0); 222 uvmexp.pdwoke++; 223 } 224 225 if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) { 226 pma->pm_flags |= UVM_PMA_BUSY; 227 constraint = pma->pm_constraint; 228 } else { 229 if (uvm_nowait_failed) { 230 /* 231 * XXX realisticly, this is what our 232 * nowait callers probably care about 233 */ 234 constraint = dma_constraint; 235 uvm_nowait_failed = 0; 236 } else 237 constraint = no_constraint; 238 } 239 240 uvm_unlock_fpageq(); 241 242 /* now lock page queues and recompute inactive count */ 243 uvm_lock_pageq(); 244 if (npages != uvmexp.npages) { /* check for new pages? */ 245 npages = uvmexp.npages; 246 uvmpd_tune(); 247 } 248 249 uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3; 250 if (uvmexp.inactarg <= uvmexp.freetarg) { 251 uvmexp.inactarg = uvmexp.freetarg + 1; 252 } 253 254 /* Reclaim pages from the buffer cache if possible. */ 255 size = 0; 256 if (pma != NULL) 257 size += pma->pm_size >> PAGE_SHIFT; 258 if (uvmexp.free - BUFPAGES_DEFICIT < uvmexp.freetarg) 259 size += uvmexp.freetarg - (uvmexp.free - 260 BUFPAGES_DEFICIT); 261 if (size == 0) 262 size = 16; /* XXX */ 263 uvm_unlock_pageq(); 264 (void) bufbackoff(&constraint, size * 2); 265 uvm_lock_pageq(); 266 267 /* Scan if needed to meet our targets. */ 268 if (pma != NULL || 269 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) || 270 ((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) { 271 uvmpd_scan(); 272 work_done = 1; /* XXX we hope... */ 273 } 274 275 /* 276 * if there's any free memory to be had, 277 * wake up any waiters. 278 */ 279 uvm_lock_fpageq(); 280 if (uvmexp.free > uvmexp.reserve_kernel || 281 uvmexp.paging == 0) { 282 wakeup(&uvmexp.free); 283 } 284 285 if (pma != NULL) { 286 pma->pm_flags &= ~UVM_PMA_BUSY; 287 if (!work_done) 288 pma->pm_flags |= UVM_PMA_FAIL; 289 if (pma->pm_flags & (UVM_PMA_FAIL | UVM_PMA_FREED)) { 290 pma->pm_flags &= ~UVM_PMA_LINKED; 291 TAILQ_REMOVE(&uvm.pmr_control.allocs, pma, 292 pmq); 293 } 294 wakeup(pma); 295 } 296 uvm_unlock_fpageq(); 297 298 /* scan done. unlock page queues (only lock we are holding) */ 299 uvm_unlock_pageq(); 300 301 sched_pause(yield); 302 } 303 /*NOTREACHED*/ 304 } 305 306 307 /* 308 * uvm_aiodone_daemon: main loop for the aiodone daemon. 309 */ 310 void 311 uvm_aiodone_daemon(void *arg) 312 { 313 int s, free; 314 struct buf *bp, *nbp; 315 316 uvm.aiodoned_proc = curproc; 317 318 for (;;) { 319 /* 320 * Check for done aio structures. If we've got structures to 321 * process, do so. Otherwise sleep while avoiding races. 322 */ 323 mtx_enter(&uvm.aiodoned_lock); 324 while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL) 325 msleep(&uvm.aiodoned, &uvm.aiodoned_lock, 326 PVM, "aiodoned", 0); 327 /* Take the list for ourselves. */ 328 TAILQ_INIT(&uvm.aio_done); 329 mtx_leave(&uvm.aiodoned_lock); 330 331 /* process each i/o that's done. */ 332 free = uvmexp.free; 333 while (bp != NULL) { 334 if (bp->b_flags & B_PDAEMON) { 335 uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT; 336 } 337 nbp = TAILQ_NEXT(bp, b_freelist); 338 s = splbio(); /* b_iodone must by called at splbio */ 339 (*bp->b_iodone)(bp); 340 splx(s); 341 bp = nbp; 342 343 sched_pause(yield); 344 } 345 uvm_lock_fpageq(); 346 wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon : 347 &uvmexp.free); 348 uvm_unlock_fpageq(); 349 } 350 } 351 352 353 354 /* 355 * uvmpd_scan_inactive: scan an inactive list for pages to clean or free. 356 * 357 * => called with page queues locked 358 * => we work on meeting our free target by converting inactive pages 359 * into free pages. 360 * => we handle the building of swap-backed clusters 361 * => we return TRUE if we are exiting because we met our target 362 */ 363 364 boolean_t 365 uvmpd_scan_inactive(struct pglist *pglst) 366 { 367 boolean_t retval = FALSE; /* assume we haven't hit target */ 368 int free, result; 369 struct vm_page *p, *nextpg; 370 struct uvm_object *uobj; 371 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; 372 int npages; 373 struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; /* XXX: see below */ 374 int swnpages, swcpages; /* XXX: see below */ 375 int swslot; 376 struct vm_anon *anon; 377 boolean_t swap_backed; 378 vaddr_t start; 379 int dirtyreacts; 380 381 /* 382 * note: we currently keep swap-backed pages on a separate inactive 383 * list from object-backed pages. however, merging the two lists 384 * back together again hasn't been ruled out. thus, we keep our 385 * swap cluster in "swpps" rather than in pps (allows us to mix 386 * clustering types in the event of a mixed inactive queue). 387 */ 388 /* 389 * swslot is non-zero if we are building a swap cluster. we want 390 * to stay in the loop while we have a page to scan or we have 391 * a swap-cluster to build. 392 */ 393 swslot = 0; 394 swnpages = swcpages = 0; 395 free = 0; 396 dirtyreacts = 0; 397 398 for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) { 399 /* 400 * note that p can be NULL iff we have traversed the whole 401 * list and need to do one final swap-backed clustered pageout. 402 */ 403 uobj = NULL; 404 anon = NULL; 405 406 if (p) { 407 /* 408 * update our copy of "free" and see if we've met 409 * our target 410 */ 411 free = uvmexp.free - BUFPAGES_DEFICIT; 412 413 if (free + uvmexp.paging >= uvmexp.freetarg << 2 || 414 dirtyreacts == UVMPD_NUMDIRTYREACTS) { 415 retval = TRUE; 416 417 if (swslot == 0) { 418 /* exit now if no swap-i/o pending */ 419 break; 420 } 421 422 /* set p to null to signal final swap i/o */ 423 p = NULL; 424 } 425 } 426 427 if (p) { /* if (we have a new page to consider) */ 428 /* 429 * we are below target and have a new page to consider. 430 */ 431 uvmexp.pdscans++; 432 nextpg = TAILQ_NEXT(p, pageq); 433 434 /* 435 * move referenced pages back to active queue and 436 * skip to next page (unlikely to happen since 437 * inactive pages shouldn't have any valid mappings 438 * and we cleared reference before deactivating). 439 */ 440 441 if (pmap_is_referenced(p)) { 442 uvm_pageactivate(p); 443 uvmexp.pdreact++; 444 continue; 445 } 446 447 if (p->pg_flags & PQ_ANON) { 448 anon = p->uanon; 449 KASSERT(anon != NULL); 450 if (p->pg_flags & PG_BUSY) { 451 uvmexp.pdbusy++; 452 /* someone else owns page, skip it */ 453 continue; 454 } 455 uvmexp.pdanscan++; 456 } else { 457 uobj = p->uobject; 458 KASSERT(uobj != NULL); 459 if (p->pg_flags & PG_BUSY) { 460 uvmexp.pdbusy++; 461 /* someone else owns page, skip it */ 462 continue; 463 } 464 uvmexp.pdobscan++; 465 } 466 467 /* 468 * we now have the page queues locked. 469 * the page is not busy. if the page is clean we 470 * can free it now and continue. 471 */ 472 if (p->pg_flags & PG_CLEAN) { 473 if (p->pg_flags & PQ_SWAPBACKED) { 474 /* this page now lives only in swap */ 475 uvmexp.swpgonly++; 476 } 477 478 /* zap all mappings with pmap_page_protect... */ 479 pmap_page_protect(p, PROT_NONE); 480 uvm_pagefree(p); 481 uvmexp.pdfreed++; 482 483 if (anon) { 484 485 /* 486 * an anonymous page can only be clean 487 * if it has backing store assigned. 488 */ 489 490 KASSERT(anon->an_swslot != 0); 491 492 /* remove from object */ 493 anon->an_page = NULL; 494 } 495 continue; 496 } 497 498 /* 499 * this page is dirty, skip it if we'll have met our 500 * free target when all the current pageouts complete. 501 */ 502 if (free + uvmexp.paging > uvmexp.freetarg << 2) { 503 continue; 504 } 505 506 /* 507 * this page is dirty, but we can't page it out 508 * since all pages in swap are only in swap. 509 * reactivate it so that we eventually cycle 510 * all pages thru the inactive queue. 511 */ 512 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 513 if ((p->pg_flags & PQ_SWAPBACKED) && 514 uvmexp.swpgonly == uvmexp.swpages) { 515 dirtyreacts++; 516 uvm_pageactivate(p); 517 continue; 518 } 519 520 /* 521 * if the page is swap-backed and dirty and swap space 522 * is full, free any swap allocated to the page 523 * so that other pages can be paged out. 524 */ 525 KASSERT(uvmexp.swpginuse <= uvmexp.swpages); 526 if ((p->pg_flags & PQ_SWAPBACKED) && 527 uvmexp.swpginuse == uvmexp.swpages) { 528 529 if ((p->pg_flags & PQ_ANON) && 530 p->uanon->an_swslot) { 531 uvm_swap_free(p->uanon->an_swslot, 1); 532 p->uanon->an_swslot = 0; 533 } 534 if (p->pg_flags & PQ_AOBJ) { 535 uao_dropswap(p->uobject, 536 p->offset >> PAGE_SHIFT); 537 } 538 } 539 540 /* 541 * the page we are looking at is dirty. we must 542 * clean it before it can be freed. to do this we 543 * first mark the page busy so that no one else will 544 * touch the page. we write protect all the mappings 545 * of the page so that no one touches it while it is 546 * in I/O. 547 */ 548 549 swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0); 550 atomic_setbits_int(&p->pg_flags, PG_BUSY); 551 UVM_PAGE_OWN(p, "scan_inactive"); 552 pmap_page_protect(p, PROT_READ); 553 uvmexp.pgswapout++; 554 555 /* 556 * for swap-backed pages we need to (re)allocate 557 * swap space. 558 */ 559 if (swap_backed) { 560 /* free old swap slot (if any) */ 561 if (anon) { 562 if (anon->an_swslot) { 563 uvm_swap_free(anon->an_swslot, 564 1); 565 anon->an_swslot = 0; 566 } 567 } else { 568 uao_dropswap(uobj, 569 p->offset >> PAGE_SHIFT); 570 } 571 572 /* start new cluster (if necessary) */ 573 if (swslot == 0) { 574 swnpages = MAXBSIZE >> PAGE_SHIFT; 575 swslot = uvm_swap_alloc(&swnpages, 576 TRUE); 577 if (swslot == 0) { 578 /* no swap? give up! */ 579 atomic_clearbits_int( 580 &p->pg_flags, 581 PG_BUSY); 582 UVM_PAGE_OWN(p, NULL); 583 continue; 584 } 585 swcpages = 0; /* cluster is empty */ 586 } 587 588 /* add block to cluster */ 589 swpps[swcpages] = p; 590 if (anon) 591 anon->an_swslot = swslot + swcpages; 592 else 593 uao_set_swslot(uobj, 594 p->offset >> PAGE_SHIFT, 595 swslot + swcpages); 596 swcpages++; 597 } 598 } else { 599 /* if p == NULL we must be doing a last swap i/o */ 600 swap_backed = TRUE; 601 } 602 603 /* 604 * now consider doing the pageout. 605 * 606 * for swap-backed pages, we do the pageout if we have either 607 * filled the cluster (in which case (swnpages == swcpages) or 608 * run out of pages (p == NULL). 609 * 610 * for object pages, we always do the pageout. 611 */ 612 if (swap_backed) { 613 if (p) { /* if we just added a page to cluster */ 614 /* cluster not full yet? */ 615 if (swcpages < swnpages) 616 continue; 617 } 618 619 /* starting I/O now... set up for it */ 620 npages = swcpages; 621 ppsp = swpps; 622 /* for swap-backed pages only */ 623 start = (vaddr_t) swslot; 624 625 /* if this is final pageout we could have a few 626 * extra swap blocks */ 627 if (swcpages < swnpages) { 628 uvm_swap_free(swslot + swcpages, 629 (swnpages - swcpages)); 630 } 631 } else { 632 /* normal object pageout */ 633 ppsp = pps; 634 npages = sizeof(pps) / sizeof(struct vm_page *); 635 /* not looked at because PGO_ALLPAGES is set */ 636 start = 0; 637 } 638 639 /* 640 * now do the pageout. 641 * 642 * for swap_backed pages we have already built the cluster. 643 * for !swap_backed pages, uvm_pager_put will call the object's 644 * "make put cluster" function to build a cluster on our behalf. 645 * 646 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct 647 * it to free the cluster pages for us on a successful I/O (it 648 * always does this for un-successful I/O requests). this 649 * allows us to do clustered pageout without having to deal 650 * with cluster pages at this level. 651 * 652 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST: 653 * IN: locked: page queues 654 * OUT: locked: 655 * !locked: pageqs 656 */ 657 658 uvmexp.pdpageouts++; 659 result = uvm_pager_put(swap_backed ? NULL : uobj, p, 660 &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0); 661 662 /* 663 * if we did i/o to swap, zero swslot to indicate that we are 664 * no longer building a swap-backed cluster. 665 */ 666 667 if (swap_backed) 668 swslot = 0; /* done with this cluster */ 669 670 /* 671 * first, we check for VM_PAGER_PEND which means that the 672 * async I/O is in progress and the async I/O done routine 673 * will clean up after us. in this case we move on to the 674 * next page. 675 * 676 * there is a very remote chance that the pending async i/o can 677 * finish _before_ we get here. if that happens, our page "p" 678 * may no longer be on the inactive queue. so we verify this 679 * when determining the next page (starting over at the head if 680 * we've lost our inactive page). 681 */ 682 683 if (result == VM_PAGER_PEND) { 684 uvmexp.paging += npages; 685 uvm_lock_pageq(); 686 uvmexp.pdpending++; 687 if (p) { 688 if (p->pg_flags & PQ_INACTIVE) 689 nextpg = TAILQ_NEXT(p, pageq); 690 else 691 nextpg = TAILQ_FIRST(pglst); 692 } else { 693 nextpg = NULL; 694 } 695 continue; 696 } 697 698 /* clean up "p" if we have one */ 699 if (p) { 700 /* 701 * the I/O request to "p" is done and uvm_pager_put 702 * has freed any cluster pages it may have allocated 703 * during I/O. all that is left for us to do is 704 * clean up page "p" (which is still PG_BUSY). 705 * 706 * our result could be one of the following: 707 * VM_PAGER_OK: successful pageout 708 * 709 * VM_PAGER_AGAIN: tmp resource shortage, we skip 710 * to next page 711 * VM_PAGER_{FAIL,ERROR,BAD}: an error. we 712 * "reactivate" page to get it out of the way (it 713 * will eventually drift back into the inactive 714 * queue for a retry). 715 * VM_PAGER_UNLOCK: should never see this as it is 716 * only valid for "get" operations 717 */ 718 719 /* relock p's object: page queues not lock yet, so 720 * no need for "try" */ 721 722 #ifdef DIAGNOSTIC 723 if (result == VM_PAGER_UNLOCK) 724 panic("pagedaemon: pageout returned " 725 "invalid 'unlock' code"); 726 #endif 727 728 /* handle PG_WANTED now */ 729 if (p->pg_flags & PG_WANTED) 730 wakeup(p); 731 732 atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED); 733 UVM_PAGE_OWN(p, NULL); 734 735 /* released during I/O? Can only happen for anons */ 736 if (p->pg_flags & PG_RELEASED) { 737 KASSERT(anon != NULL); 738 /* 739 * remove page so we can get nextpg, 740 * also zero out anon so we don't use 741 * it after the free. 742 */ 743 anon->an_page = NULL; 744 p->uanon = NULL; 745 746 uvm_anfree(anon); /* kills anon */ 747 pmap_page_protect(p, PROT_NONE); 748 anon = NULL; 749 uvm_lock_pageq(); 750 nextpg = TAILQ_NEXT(p, pageq); 751 /* free released page */ 752 uvm_pagefree(p); 753 } else { /* page was not released during I/O */ 754 uvm_lock_pageq(); 755 nextpg = TAILQ_NEXT(p, pageq); 756 if (result != VM_PAGER_OK) { 757 /* pageout was a failure... */ 758 if (result != VM_PAGER_AGAIN) 759 uvm_pageactivate(p); 760 pmap_clear_reference(p); 761 /* XXXCDC: if (swap_backed) FREE p's 762 * swap block? */ 763 } else { 764 /* pageout was a success... */ 765 pmap_clear_reference(p); 766 pmap_clear_modify(p); 767 atomic_setbits_int(&p->pg_flags, 768 PG_CLEAN); 769 } 770 } 771 772 /* 773 * drop object lock (if there is an object left). do 774 * a safety check of nextpg to make sure it is on the 775 * inactive queue (it should be since PG_BUSY pages on 776 * the inactive queue can't be re-queued [note: not 777 * true for active queue]). 778 */ 779 780 if (nextpg && (nextpg->pg_flags & PQ_INACTIVE) == 0) { 781 nextpg = TAILQ_FIRST(pglst); /* reload! */ 782 } 783 } else { 784 /* 785 * if p is null in this loop, make sure it stays null 786 * in the next loop. 787 */ 788 nextpg = NULL; 789 790 /* 791 * lock page queues here just so they're always locked 792 * at the end of the loop. 793 */ 794 uvm_lock_pageq(); 795 } 796 } 797 return (retval); 798 } 799 800 /* 801 * uvmpd_scan: scan the page queues and attempt to meet our targets. 802 * 803 * => called with pageq's locked 804 */ 805 806 void 807 uvmpd_scan(void) 808 { 809 int free, inactive_shortage, swap_shortage, pages_freed; 810 struct vm_page *p, *nextpg; 811 struct uvm_object *uobj; 812 boolean_t got_it; 813 814 uvmexp.pdrevs++; /* counter */ 815 uobj = NULL; 816 817 /* 818 * get current "free" page count 819 */ 820 free = uvmexp.free - BUFPAGES_DEFICIT; 821 822 #ifndef __SWAP_BROKEN 823 /* 824 * swap out some processes if we are below our free target. 825 * we need to unlock the page queues for this. 826 */ 827 if (free < uvmexp.freetarg) { 828 uvmexp.pdswout++; 829 uvm_unlock_pageq(); 830 uvm_swapout_threads(); 831 uvm_lock_pageq(); 832 } 833 #endif 834 835 /* 836 * now we want to work on meeting our targets. first we work on our 837 * free target by converting inactive pages into free pages. then 838 * we work on meeting our inactive target by converting active pages 839 * to inactive ones. 840 */ 841 842 /* 843 * alternate starting queue between swap and object based on the 844 * low bit of uvmexp.pdrevs (which we bump by one each call). 845 */ 846 got_it = FALSE; 847 pages_freed = uvmexp.pdfreed; /* XXX - int */ 848 if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0) 849 got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp); 850 if (!got_it) 851 got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj); 852 if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0) 853 (void) uvmpd_scan_inactive(&uvm.page_inactive_swp); 854 pages_freed = uvmexp.pdfreed - pages_freed; 855 856 /* 857 * we have done the scan to get free pages. now we work on meeting 858 * our inactive target. 859 */ 860 inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT; 861 862 /* 863 * detect if we're not going to be able to page anything out 864 * until we free some swap resources from active pages. 865 */ 866 swap_shortage = 0; 867 if (uvmexp.free < uvmexp.freetarg && 868 uvmexp.swpginuse == uvmexp.swpages && 869 uvmexp.swpgonly < uvmexp.swpages && 870 pages_freed == 0) { 871 swap_shortage = uvmexp.freetarg - uvmexp.free; 872 } 873 874 for (p = TAILQ_FIRST(&uvm.page_active); 875 p != NULL && (inactive_shortage > 0 || swap_shortage > 0); 876 p = nextpg) { 877 nextpg = TAILQ_NEXT(p, pageq); 878 879 /* skip this page if it's busy. */ 880 if (p->pg_flags & PG_BUSY) 881 continue; 882 883 if (p->pg_flags & PQ_ANON) 884 KASSERT(p->uanon != NULL); 885 else 886 KASSERT(p->uobject != NULL); 887 888 /* 889 * if there's a shortage of swap, free any swap allocated 890 * to this page so that other pages can be paged out. 891 */ 892 if (swap_shortage > 0) { 893 if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) { 894 uvm_swap_free(p->uanon->an_swslot, 1); 895 p->uanon->an_swslot = 0; 896 atomic_clearbits_int(&p->pg_flags, PG_CLEAN); 897 swap_shortage--; 898 } 899 if (p->pg_flags & PQ_AOBJ) { 900 int slot = uao_set_swslot(p->uobject, 901 p->offset >> PAGE_SHIFT, 0); 902 if (slot) { 903 uvm_swap_free(slot, 1); 904 atomic_clearbits_int(&p->pg_flags, 905 PG_CLEAN); 906 swap_shortage--; 907 } 908 } 909 } 910 911 /* 912 * deactivate this page if there's a shortage of 913 * inactive pages. 914 */ 915 if (inactive_shortage > 0) { 916 pmap_page_protect(p, PROT_NONE); 917 /* no need to check wire_count as pg is "active" */ 918 uvm_pagedeactivate(p); 919 uvmexp.pddeact++; 920 inactive_shortage--; 921 } 922 } 923 } 924 925 #ifdef HIBERNATE 926 927 /* 928 * uvmpd_drop: drop clean pages from list 929 */ 930 void 931 uvmpd_drop(struct pglist *pglst) 932 { 933 struct vm_page *p, *nextpg; 934 935 for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) { 936 nextpg = TAILQ_NEXT(p, pageq); 937 938 if (p->pg_flags & PQ_ANON || p->uobject == NULL) 939 continue; 940 941 if (p->pg_flags & PG_BUSY) 942 continue; 943 944 if (p->pg_flags & PG_CLEAN) { 945 /* 946 * we now have the page queues locked. 947 * the page is not busy. if the page is clean we 948 * can free it now and continue. 949 */ 950 if (p->pg_flags & PG_CLEAN) { 951 if (p->pg_flags & PQ_SWAPBACKED) { 952 /* this page now lives only in swap */ 953 uvmexp.swpgonly++; 954 } 955 956 /* zap all mappings with pmap_page_protect... */ 957 pmap_page_protect(p, PROT_NONE); 958 uvm_pagefree(p); 959 } 960 } 961 } 962 } 963 964 void 965 uvmpd_hibernate(void) 966 { 967 uvm_lock_pageq(); 968 969 uvmpd_drop(&uvm.page_inactive_swp); 970 uvmpd_drop(&uvm.page_inactive_obj); 971 uvmpd_drop(&uvm.page_active); 972 973 uvm_unlock_pageq(); 974 } 975 976 #endif 977