1 /* $NetBSD: uvm_pdaemon.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94 42 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 /* 70 * uvm_pdaemon.c: the page daemon 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $"); 75 76 #include "opt_uvmhist.h" 77 78 #include <sys/param.h> 79 #include <sys/proc.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/pool.h> 83 #include <sys/buf.h> 84 #include <sys/vnode.h> 85 86 #include <uvm/uvm.h> 87 88 /* 89 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate 90 * in a pass thru the inactive list when swap is full. the value should be 91 * "small"... if it's too large we'll cycle the active pages thru the inactive 92 * queue too quickly to for them to be referenced and avoid being freed. 93 */ 94 95 #define UVMPD_NUMDIRTYREACTS 16 96 97 98 /* 99 * local prototypes 100 */ 101 102 static void uvmpd_scan(void); 103 static void uvmpd_scan_inactive(struct pglist *); 104 static void uvmpd_tune(void); 105 106 /* 107 * XXX hack to avoid hangs when large processes fork. 108 */ 109 int uvm_extrapages; 110 111 /* 112 * uvm_wait: wait (sleep) for the page daemon to free some pages 113 * 114 * => should be called with all locks released 115 * => should _not_ be called by the page daemon (to avoid deadlock) 116 */ 117 118 void 119 uvm_wait(const char *wmsg) 120 { 121 int timo = 0; 122 int s = splbio(); 123 124 /* 125 * check for page daemon going to sleep (waiting for itself) 126 */ 127 128 if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) { 129 /* 130 * now we have a problem: the pagedaemon wants to go to 131 * sleep until it frees more memory. but how can it 132 * free more memory if it is asleep? that is a deadlock. 133 * we have two options: 134 * [1] panic now 135 * [2] put a timeout on the sleep, thus causing the 136 * pagedaemon to only pause (rather than sleep forever) 137 * 138 * note that option [2] will only help us if we get lucky 139 * and some other process on the system breaks the deadlock 140 * by exiting or freeing memory (thus allowing the pagedaemon 141 * to continue). for now we panic if DEBUG is defined, 142 * otherwise we hope for the best with option [2] (better 143 * yet, this should never happen in the first place!). 144 */ 145 146 printf("pagedaemon: deadlock detected!\n"); 147 timo = hz >> 3; /* set timeout */ 148 #if defined(DEBUG) 149 /* DEBUG: panic so we can debug it */ 150 panic("pagedaemon deadlock"); 151 #endif 152 } 153 154 simple_lock(&uvm.pagedaemon_lock); 155 wakeup(&uvm.pagedaemon); /* wake the daemon! */ 156 UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg, 157 timo); 158 159 splx(s); 160 } 161 162 163 /* 164 * uvmpd_tune: tune paging parameters 165 * 166 * => called when ever memory is added (or removed?) to the system 167 * => caller must call with page queues locked 168 */ 169 170 static void 171 uvmpd_tune(void) 172 { 173 UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist); 174 175 uvmexp.freemin = uvmexp.npages / 20; 176 177 /* between 16k and 256k */ 178 /* XXX: what are these values good for? */ 179 uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT); 180 uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT); 181 182 /* Make sure there's always a user page free. */ 183 if (uvmexp.freemin < uvmexp.reserve_kernel + 1) 184 uvmexp.freemin = uvmexp.reserve_kernel + 1; 185 186 uvmexp.freetarg = (uvmexp.freemin * 4) / 3; 187 if (uvmexp.freetarg <= uvmexp.freemin) 188 uvmexp.freetarg = uvmexp.freemin + 1; 189 190 uvmexp.freetarg += uvm_extrapages; 191 uvm_extrapages = 0; 192 193 /* uvmexp.inactarg: computed in main daemon loop */ 194 195 uvmexp.wiredmax = uvmexp.npages / 3; 196 UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d", 197 uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0); 198 } 199 200 /* 201 * uvm_pageout: the main loop for the pagedaemon 202 */ 203 204 void 205 uvm_pageout(void *arg) 206 { 207 int bufcnt, npages = 0; 208 int extrapages = 0; 209 UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist); 210 211 UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0); 212 213 /* 214 * ensure correct priority and set paging parameters... 215 */ 216 217 uvm.pagedaemon_proc = curproc; 218 uvm_lock_pageq(); 219 npages = uvmexp.npages; 220 uvmpd_tune(); 221 uvm_unlock_pageq(); 222 223 /* 224 * main loop 225 */ 226 227 for (;;) { 228 simple_lock(&uvm.pagedaemon_lock); 229 230 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0); 231 UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon, 232 &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0); 233 uvmexp.pdwoke++; 234 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0); 235 236 /* 237 * now lock page queues and recompute inactive count 238 */ 239 240 uvm_lock_pageq(); 241 if (npages != uvmexp.npages || extrapages != uvm_extrapages) { 242 npages = uvmexp.npages; 243 extrapages = uvm_extrapages; 244 uvmpd_tune(); 245 } 246 247 uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3; 248 if (uvmexp.inactarg <= uvmexp.freetarg) { 249 uvmexp.inactarg = uvmexp.freetarg + 1; 250 } 251 252 /* 253 * Estimate a hint. Note that bufmem are returned to 254 * system only when entire pool page is empty. 255 */ 256 bufcnt = uvmexp.freetarg - uvmexp.free; 257 if (bufcnt < 0) 258 bufcnt = 0; 259 260 UVMHIST_LOG(pdhist," free/ftarg=%d/%d, inact/itarg=%d/%d", 261 uvmexp.free, uvmexp.freetarg, uvmexp.inactive, 262 uvmexp.inactarg); 263 264 /* 265 * scan if needed 266 */ 267 268 if (uvmexp.free + uvmexp.paging < uvmexp.freetarg || 269 uvmexp.inactive < uvmexp.inactarg) { 270 uvmpd_scan(); 271 } 272 273 /* 274 * if there's any free memory to be had, 275 * wake up any waiters. 276 */ 277 278 if (uvmexp.free > uvmexp.reserve_kernel || 279 uvmexp.paging == 0) { 280 wakeup(&uvmexp.free); 281 } 282 283 /* 284 * scan done. unlock page queues (the only lock we are holding) 285 */ 286 287 uvm_unlock_pageq(); 288 289 buf_drain(bufcnt << PAGE_SHIFT); 290 291 /* 292 * drain pool resources now that we're not holding any locks 293 */ 294 295 pool_drain(0); 296 297 /* 298 * free any cached u-areas we don't need 299 */ 300 uvm_uarea_drain(TRUE); 301 302 } 303 /*NOTREACHED*/ 304 } 305 306 307 /* 308 * uvm_aiodone_daemon: main loop for the aiodone daemon. 309 */ 310 311 void 312 uvm_aiodone_daemon(void *arg) 313 { 314 int s, free; 315 struct buf *bp, *nbp; 316 UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist); 317 318 for (;;) { 319 320 /* 321 * carefully attempt to go to sleep (without losing "wakeups"!). 322 * we need splbio because we want to make sure the aio_done list 323 * is totally empty before we go to sleep. 324 */ 325 326 s = splbio(); 327 simple_lock(&uvm.aiodoned_lock); 328 if (TAILQ_FIRST(&uvm.aio_done) == NULL) { 329 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0); 330 UVM_UNLOCK_AND_WAIT(&uvm.aiodoned, 331 &uvm.aiodoned_lock, FALSE, "aiodoned", 0); 332 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0); 333 334 /* relock aiodoned_lock, still at splbio */ 335 simple_lock(&uvm.aiodoned_lock); 336 } 337 338 /* 339 * check for done aio structures 340 */ 341 342 bp = TAILQ_FIRST(&uvm.aio_done); 343 if (bp) { 344 TAILQ_INIT(&uvm.aio_done); 345 } 346 347 simple_unlock(&uvm.aiodoned_lock); 348 splx(s); 349 350 /* 351 * process each i/o that's done. 352 */ 353 354 free = uvmexp.free; 355 while (bp != NULL) { 356 nbp = TAILQ_NEXT(bp, b_freelist); 357 (*bp->b_iodone)(bp); 358 bp = nbp; 359 } 360 if (free <= uvmexp.reserve_kernel) { 361 s = uvm_lock_fpageq(); 362 wakeup(&uvm.pagedaemon); 363 uvm_unlock_fpageq(s); 364 } else { 365 simple_lock(&uvm.pagedaemon_lock); 366 wakeup(&uvmexp.free); 367 simple_unlock(&uvm.pagedaemon_lock); 368 } 369 } 370 } 371 372 /* 373 * uvmpd_scan_inactive: scan an inactive list for pages to clean or free. 374 * 375 * => called with page queues locked 376 * => we work on meeting our free target by converting inactive pages 377 * into free pages. 378 * => we handle the building of swap-backed clusters 379 * => we return TRUE if we are exiting because we met our target 380 */ 381 382 static void 383 uvmpd_scan_inactive(struct pglist *pglst) 384 { 385 int error; 386 struct vm_page *p, *nextpg = NULL; /* Quell compiler warning */ 387 struct uvm_object *uobj; 388 struct vm_anon *anon; 389 struct vm_page *swpps[round_page(MAXPHYS) >> PAGE_SHIFT]; 390 struct simplelock *slock; 391 int swnpages, swcpages; 392 int swslot; 393 int dirtyreacts, t, result; 394 boolean_t anonunder, fileunder, execunder; 395 boolean_t anonover, fileover, execover; 396 boolean_t anonreact, filereact, execreact; 397 UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist); 398 399 /* 400 * swslot is non-zero if we are building a swap cluster. we want 401 * to stay in the loop while we have a page to scan or we have 402 * a swap-cluster to build. 403 */ 404 405 swslot = 0; 406 swnpages = swcpages = 0; 407 dirtyreacts = 0; 408 409 /* 410 * decide which types of pages we want to reactivate instead of freeing 411 * to keep usage within the minimum and maximum usage limits. 412 */ 413 414 t = uvmexp.active + uvmexp.inactive + uvmexp.free; 415 anonunder = (uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8); 416 fileunder = (uvmexp.filepages <= (t * uvmexp.filemin) >> 8); 417 execunder = (uvmexp.execpages <= (t * uvmexp.execmin) >> 8); 418 anonover = uvmexp.anonpages > ((t * uvmexp.anonmax) >> 8); 419 fileover = uvmexp.filepages > ((t * uvmexp.filemax) >> 8); 420 execover = uvmexp.execpages > ((t * uvmexp.execmax) >> 8); 421 anonreact = anonunder || (!anonover && (fileover || execover)); 422 filereact = fileunder || (!fileover && (anonover || execover)); 423 execreact = execunder || (!execover && (anonover || fileover)); 424 if (filereact && execreact && (anonreact || uvm_swapisfull())) { 425 anonreact = filereact = execreact = FALSE; 426 } 427 for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) { 428 uobj = NULL; 429 anon = NULL; 430 if (p) { 431 432 /* 433 * see if we've met the free target. 434 */ 435 436 if (uvmexp.free + uvmexp.paging >= 437 uvmexp.freetarg << 2 || 438 dirtyreacts == UVMPD_NUMDIRTYREACTS) { 439 UVMHIST_LOG(pdhist," met free target: " 440 "exit loop", 0, 0, 0, 0); 441 442 if (swslot == 0) { 443 /* exit now if no swap-i/o pending */ 444 break; 445 } 446 447 /* set p to null to signal final swap i/o */ 448 p = NULL; 449 nextpg = NULL; 450 } 451 } 452 if (p) { /* if (we have a new page to consider) */ 453 454 /* 455 * we are below target and have a new page to consider. 456 */ 457 458 uvmexp.pdscans++; 459 nextpg = TAILQ_NEXT(p, pageq); 460 461 /* 462 * move referenced pages back to active queue and 463 * skip to next page. 464 */ 465 466 if (pmap_clear_reference(p)) { 467 uvm_pageactivate(p); 468 uvmexp.pdreact++; 469 continue; 470 } 471 anon = p->uanon; 472 uobj = p->uobject; 473 474 /* 475 * enforce the minimum thresholds on different 476 * types of memory usage. if reusing the current 477 * page would reduce that type of usage below its 478 * minimum, reactivate the page instead and move 479 * on to the next page. 480 */ 481 482 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) { 483 uvm_pageactivate(p); 484 uvmexp.pdreexec++; 485 continue; 486 } 487 if (uobj && UVM_OBJ_IS_VNODE(uobj) && 488 !UVM_OBJ_IS_VTEXT(uobj) && filereact) { 489 uvm_pageactivate(p); 490 uvmexp.pdrefile++; 491 continue; 492 } 493 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && anonreact) { 494 uvm_pageactivate(p); 495 uvmexp.pdreanon++; 496 continue; 497 } 498 499 /* 500 * first we attempt to lock the object that this page 501 * belongs to. if our attempt fails we skip on to 502 * the next page (no harm done). it is important to 503 * "try" locking the object as we are locking in the 504 * wrong order (pageq -> object) and we don't want to 505 * deadlock. 506 * 507 * the only time we expect to see an ownerless page 508 * (i.e. a page with no uobject and !PQ_ANON) is if an 509 * anon has loaned a page from a uvm_object and the 510 * uvm_object has dropped the ownership. in that 511 * case, the anon can "take over" the loaned page 512 * and make it its own. 513 */ 514 515 /* does the page belong to an object? */ 516 if (uobj != NULL) { 517 slock = &uobj->vmobjlock; 518 if (!simple_lock_try(slock)) { 519 continue; 520 } 521 if (p->flags & PG_BUSY) { 522 simple_unlock(slock); 523 uvmexp.pdbusy++; 524 continue; 525 } 526 uvmexp.pdobscan++; 527 } else { 528 KASSERT(anon != NULL); 529 slock = &anon->an_lock; 530 if (!simple_lock_try(slock)) { 531 continue; 532 } 533 534 /* 535 * set PQ_ANON if it isn't set already. 536 */ 537 538 if ((p->pqflags & PQ_ANON) == 0) { 539 KASSERT(p->loan_count > 0); 540 p->loan_count--; 541 p->pqflags |= PQ_ANON; 542 /* anon now owns it */ 543 } 544 if (p->flags & PG_BUSY) { 545 simple_unlock(slock); 546 uvmexp.pdbusy++; 547 continue; 548 } 549 uvmexp.pdanscan++; 550 } 551 552 553 /* 554 * we now have the object and the page queues locked. 555 * if the page is not swap-backed, call the object's 556 * pager to flush and free the page. 557 */ 558 559 if ((p->pqflags & PQ_SWAPBACKED) == 0) { 560 uvm_unlock_pageq(); 561 (void) (uobj->pgops->pgo_put)(uobj, p->offset, 562 p->offset + PAGE_SIZE, 563 PGO_CLEANIT|PGO_FREE); 564 uvm_lock_pageq(); 565 if (nextpg && 566 (nextpg->pqflags & PQ_INACTIVE) == 0) { 567 nextpg = TAILQ_FIRST(pglst); 568 } 569 continue; 570 } 571 572 /* 573 * the page is swap-backed. remove all the permissions 574 * from the page so we can sync the modified info 575 * without any race conditions. if the page is clean 576 * we can free it now and continue. 577 */ 578 579 pmap_page_protect(p, VM_PROT_NONE); 580 if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) { 581 p->flags &= ~(PG_CLEAN); 582 } 583 if (p->flags & PG_CLEAN) { 584 int slot; 585 int pageidx; 586 587 pageidx = p->offset >> PAGE_SHIFT; 588 uvm_pagefree(p); 589 uvmexp.pdfreed++; 590 591 /* 592 * for anons, we need to remove the page 593 * from the anon ourselves. for aobjs, 594 * pagefree did that for us. 595 */ 596 597 if (anon) { 598 KASSERT(anon->an_swslot != 0); 599 anon->an_page = NULL; 600 slot = anon->an_swslot; 601 } else { 602 slot = uao_find_swslot(uobj, pageidx); 603 } 604 simple_unlock(slock); 605 606 if (slot > 0) { 607 /* this page is now only in swap. */ 608 simple_lock(&uvm.swap_data_lock); 609 KASSERT(uvmexp.swpgonly < 610 uvmexp.swpginuse); 611 uvmexp.swpgonly++; 612 simple_unlock(&uvm.swap_data_lock); 613 } 614 continue; 615 } 616 617 /* 618 * this page is dirty, skip it if we'll have met our 619 * free target when all the current pageouts complete. 620 */ 621 622 if (uvmexp.free + uvmexp.paging > 623 uvmexp.freetarg << 2) { 624 simple_unlock(slock); 625 continue; 626 } 627 628 /* 629 * free any swap space allocated to the page since 630 * we'll have to write it again with its new data. 631 */ 632 633 if ((p->pqflags & PQ_ANON) && anon->an_swslot) { 634 uvm_swap_free(anon->an_swslot, 1); 635 anon->an_swslot = 0; 636 } else if (p->pqflags & PQ_AOBJ) { 637 uao_dropswap(uobj, p->offset >> PAGE_SHIFT); 638 } 639 640 /* 641 * if all pages in swap are only in swap, 642 * the swap space is full and we can't page out 643 * any more swap-backed pages. reactivate this page 644 * so that we eventually cycle all pages through 645 * the inactive queue. 646 */ 647 648 if (uvm_swapisfull()) { 649 dirtyreacts++; 650 uvm_pageactivate(p); 651 simple_unlock(slock); 652 continue; 653 } 654 655 /* 656 * start new swap pageout cluster (if necessary). 657 */ 658 659 if (swslot == 0) { 660 /* Even with strange MAXPHYS, the shift 661 implicitly rounds down to a page. */ 662 swnpages = MAXPHYS >> PAGE_SHIFT; 663 swslot = uvm_swap_alloc(&swnpages, TRUE); 664 if (swslot == 0) { 665 simple_unlock(slock); 666 continue; 667 } 668 swcpages = 0; 669 } 670 671 /* 672 * at this point, we're definitely going reuse this 673 * page. mark the page busy and delayed-free. 674 * we should remove the page from the page queues 675 * so we don't ever look at it again. 676 * adjust counters and such. 677 */ 678 679 p->flags |= PG_BUSY; 680 UVM_PAGE_OWN(p, "scan_inactive"); 681 682 p->flags |= PG_PAGEOUT; 683 uvmexp.paging++; 684 uvm_pagedequeue(p); 685 686 uvmexp.pgswapout++; 687 688 /* 689 * add the new page to the cluster. 690 */ 691 692 if (anon) { 693 anon->an_swslot = swslot + swcpages; 694 simple_unlock(slock); 695 } else { 696 result = uao_set_swslot(uobj, 697 p->offset >> PAGE_SHIFT, swslot + swcpages); 698 if (result == -1) { 699 p->flags &= ~(PG_BUSY|PG_PAGEOUT); 700 UVM_PAGE_OWN(p, NULL); 701 uvmexp.paging--; 702 uvm_pageactivate(p); 703 simple_unlock(slock); 704 continue; 705 } 706 simple_unlock(slock); 707 } 708 swpps[swcpages] = p; 709 swcpages++; 710 711 /* 712 * if the cluster isn't full, look for more pages 713 * before starting the i/o. 714 */ 715 716 if (swcpages < swnpages) { 717 continue; 718 } 719 } 720 721 /* 722 * if this is the final pageout we could have a few 723 * unused swap blocks. if so, free them now. 724 */ 725 726 if (swcpages < swnpages) { 727 uvm_swap_free(swslot + swcpages, (swnpages - swcpages)); 728 } 729 730 /* 731 * now start the pageout. 732 */ 733 734 uvm_unlock_pageq(); 735 uvmexp.pdpageouts++; 736 error = uvm_swap_put(swslot, swpps, swcpages, 0); 737 KASSERT(error == 0); 738 uvm_lock_pageq(); 739 740 /* 741 * zero swslot to indicate that we are 742 * no longer building a swap-backed cluster. 743 */ 744 745 swslot = 0; 746 747 /* 748 * the pageout is in progress. bump counters and set up 749 * for the next loop. 750 */ 751 752 uvmexp.pdpending++; 753 if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) { 754 nextpg = TAILQ_FIRST(pglst); 755 } 756 } 757 } 758 759 /* 760 * uvmpd_scan: scan the page queues and attempt to meet our targets. 761 * 762 * => called with pageq's locked 763 */ 764 765 static void 766 uvmpd_scan(void) 767 { 768 int inactive_shortage, swap_shortage, pages_freed; 769 struct vm_page *p, *nextpg; 770 struct uvm_object *uobj; 771 struct vm_anon *anon; 772 struct simplelock *slock; 773 UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist); 774 775 uvmexp.pdrevs++; 776 uobj = NULL; 777 anon = NULL; 778 779 #ifndef __SWAP_BROKEN 780 781 /* 782 * swap out some processes if we are below our free target. 783 * we need to unlock the page queues for this. 784 */ 785 786 if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) { 787 uvmexp.pdswout++; 788 UVMHIST_LOG(pdhist," free %d < target %d: swapout", 789 uvmexp.free, uvmexp.freetarg, 0, 0); 790 uvm_unlock_pageq(); 791 uvm_swapout_threads(); 792 uvm_lock_pageq(); 793 794 } 795 #endif 796 797 /* 798 * now we want to work on meeting our targets. first we work on our 799 * free target by converting inactive pages into free pages. then 800 * we work on meeting our inactive target by converting active pages 801 * to inactive ones. 802 */ 803 804 UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0); 805 806 pages_freed = uvmexp.pdfreed; 807 uvmpd_scan_inactive(&uvm.page_inactive); 808 pages_freed = uvmexp.pdfreed - pages_freed; 809 810 /* 811 * we have done the scan to get free pages. now we work on meeting 812 * our inactive target. 813 */ 814 815 inactive_shortage = uvmexp.inactarg - uvmexp.inactive; 816 817 /* 818 * detect if we're not going to be able to page anything out 819 * until we free some swap resources from active pages. 820 */ 821 822 swap_shortage = 0; 823 if (uvmexp.free < uvmexp.freetarg && 824 uvmexp.swpginuse >= uvmexp.swpgavail && 825 !uvm_swapisfull() && 826 pages_freed == 0) { 827 swap_shortage = uvmexp.freetarg - uvmexp.free; 828 } 829 830 UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%d swap_shortage=%d", 831 inactive_shortage, swap_shortage,0,0); 832 for (p = TAILQ_FIRST(&uvm.page_active); 833 p != NULL && (inactive_shortage > 0 || swap_shortage > 0); 834 p = nextpg) { 835 nextpg = TAILQ_NEXT(p, pageq); 836 if (p->flags & PG_BUSY) { 837 continue; 838 } 839 840 /* 841 * lock the page's owner. 842 */ 843 844 if (p->uobject != NULL) { 845 uobj = p->uobject; 846 slock = &uobj->vmobjlock; 847 if (!simple_lock_try(slock)) { 848 continue; 849 } 850 } else { 851 anon = p->uanon; 852 KASSERT(anon != NULL); 853 slock = &anon->an_lock; 854 if (!simple_lock_try(slock)) { 855 continue; 856 } 857 858 /* take over the page? */ 859 if ((p->pqflags & PQ_ANON) == 0) { 860 KASSERT(p->loan_count > 0); 861 p->loan_count--; 862 p->pqflags |= PQ_ANON; 863 } 864 } 865 866 /* 867 * skip this page if it's busy. 868 */ 869 870 if ((p->flags & PG_BUSY) != 0) { 871 simple_unlock(slock); 872 continue; 873 } 874 875 /* 876 * if there's a shortage of swap, free any swap allocated 877 * to this page so that other pages can be paged out. 878 */ 879 880 if (swap_shortage > 0) { 881 if ((p->pqflags & PQ_ANON) && anon->an_swslot) { 882 uvm_swap_free(anon->an_swslot, 1); 883 anon->an_swslot = 0; 884 p->flags &= ~PG_CLEAN; 885 swap_shortage--; 886 } else if (p->pqflags & PQ_AOBJ) { 887 int slot = uao_set_swslot(uobj, 888 p->offset >> PAGE_SHIFT, 0); 889 if (slot) { 890 uvm_swap_free(slot, 1); 891 p->flags &= ~PG_CLEAN; 892 swap_shortage--; 893 } 894 } 895 } 896 897 /* 898 * if there's a shortage of inactive pages, deactivate. 899 */ 900 901 if (inactive_shortage > 0) { 902 /* no need to check wire_count as pg is "active" */ 903 uvm_pagedeactivate(p); 904 uvmexp.pddeact++; 905 inactive_shortage--; 906 } 907 908 /* 909 * we're done with this page. 910 */ 911 912 simple_unlock(slock); 913 } 914 } 915 916 /* 917 * uvm_reclaimable: decide whether to wait for pagedaemon. 918 * 919 * => return TRUE if it seems to be worth to do uvm_wait. 920 * 921 * XXX should be tunable. 922 * XXX should consider pools, etc? 923 */ 924 925 boolean_t 926 uvm_reclaimable(void) 927 { 928 int filepages; 929 930 /* 931 * if swap is not full, no problem. 932 */ 933 934 if (!uvm_swapisfull()) { 935 return TRUE; 936 } 937 938 /* 939 * file-backed pages can be reclaimed even when swap is full. 940 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim. 941 * 942 * XXX assume the worst case, ie. all wired pages are file-backed. 943 * 944 * XXX should consider about other reclaimable memory. 945 * XXX ie. pools, traditional buffer cache. 946 */ 947 948 filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired; 949 if (filepages >= MIN((uvmexp.active + uvmexp.inactive) >> 4, 950 5 * 1024 * 1024 >> PAGE_SHIFT)) { 951 return TRUE; 952 } 953 954 /* 955 * kill the process, fail allocation, etc.. 956 */ 957 958 return FALSE; 959 } 960