1 /* $NetBSD: uvm_pager.c,v 1.30 2000/05/20 03:36:06 thorpej Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp 35 */ 36 37 #include "opt_uvmhist.h" 38 39 /* 40 * uvm_pager.c: generic functions used to assist the pagers. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_kern.h> 51 52 #define UVM_PAGER 53 #include <uvm/uvm.h> 54 55 /* 56 * list of uvm pagers in the system 57 */ 58 59 extern struct uvm_pagerops uvm_deviceops; 60 extern struct uvm_pagerops uvm_vnodeops; 61 62 struct uvm_pagerops *uvmpagerops[] = { 63 &aobj_pager, 64 &uvm_deviceops, 65 &uvm_vnodeops, 66 }; 67 68 /* 69 * the pager map: provides KVA for I/O 70 */ 71 72 #define PAGER_MAP_SIZE (4 * 1024 * 1024) 73 vm_map_t pager_map; /* XXX */ 74 simple_lock_data_t pager_map_wanted_lock; 75 boolean_t pager_map_wanted; /* locked by pager map */ 76 77 78 /* 79 * uvm_pager_init: init pagers (at boot time) 80 */ 81 82 void 83 uvm_pager_init() 84 { 85 int lcv; 86 87 /* 88 * init pager map 89 */ 90 91 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva, 92 PAGER_MAP_SIZE, 0, FALSE, NULL); 93 simple_lock_init(&pager_map_wanted_lock); 94 pager_map_wanted = FALSE; 95 96 /* 97 * init ASYNC I/O queue 98 */ 99 100 TAILQ_INIT(&uvm.aio_done); 101 102 /* 103 * call pager init functions 104 */ 105 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *); 106 lcv++) { 107 if (uvmpagerops[lcv]->pgo_init) 108 uvmpagerops[lcv]->pgo_init(); 109 } 110 } 111 112 /* 113 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 114 * 115 * we basically just map in a blank map entry to reserve the space in the 116 * map and then use pmap_enter() to put the mappings in by hand. 117 */ 118 119 vaddr_t 120 uvm_pagermapin(pps, npages, aiop, flags) 121 struct vm_page **pps; 122 int npages; 123 struct uvm_aiodesc **aiop; /* OUT */ 124 int flags; 125 { 126 vsize_t size; 127 vaddr_t kva; 128 struct uvm_aiodesc *aio; 129 vaddr_t cva; 130 struct vm_page *pp; 131 vm_prot_t prot; 132 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist); 133 134 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)", 135 pps, npages, aiop, flags); 136 137 /* 138 * compute protection. outgoing I/O only needs read 139 * access to the page, whereas incoming needs read/write. 140 */ 141 142 prot = VM_PROT_READ; 143 if (flags & UVMPAGER_MAPIN_READ) 144 prot |= VM_PROT_WRITE; 145 146 ReStart: 147 if (aiop) { 148 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, 149 (flags & UVMPAGER_MAPIN_WAITOK)); 150 if (aio == NULL) 151 return(0); 152 *aiop = aio; 153 } else { 154 aio = NULL; 155 } 156 157 size = npages << PAGE_SHIFT; 158 kva = 0; /* let system choose VA */ 159 160 if (uvm_map(pager_map, &kva, size, NULL, 161 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) { 162 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) { 163 if (aio) 164 FREE(aio, M_TEMP); 165 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); 166 return(0); 167 } 168 simple_lock(&pager_map_wanted_lock); 169 pager_map_wanted = TRUE; 170 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0); 171 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE, 172 "pager_map",0); 173 goto ReStart; 174 } 175 176 /* got it */ 177 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { 178 pp = *pps++; 179 #ifdef DEBUG 180 if ((pp->flags & PG_BUSY) == 0) 181 panic("uvm_pagermapin: page not busy"); 182 #endif 183 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp), 184 prot, PMAP_WIRED | prot); 185 } 186 187 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0); 188 return(kva); 189 } 190 191 /* 192 * uvm_pagermapout: remove pager_map mapping 193 * 194 * we remove our mappings by hand and then remove the mapping (waking 195 * up anyone wanting space). 196 */ 197 198 void 199 uvm_pagermapout(kva, npages) 200 vaddr_t kva; 201 int npages; 202 { 203 vsize_t size = npages << PAGE_SHIFT; 204 vm_map_entry_t entries; 205 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist); 206 207 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0); 208 209 /* 210 * duplicate uvm_unmap, but add in pager_map_wanted handling. 211 */ 212 213 vm_map_lock(pager_map); 214 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries); 215 simple_lock(&pager_map_wanted_lock); 216 if (pager_map_wanted) { 217 pager_map_wanted = FALSE; 218 wakeup(pager_map); 219 } 220 simple_unlock(&pager_map_wanted_lock); 221 vm_map_unlock(pager_map); 222 if (entries) 223 uvm_unmap_detach(entries, 0); 224 225 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 226 } 227 228 /* 229 * uvm_mk_pcluster 230 * 231 * generic "make 'pager put' cluster" function. a pager can either 232 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this 233 * generic function, or [3] set it to a pager specific function. 234 * 235 * => caller must lock object _and_ pagequeues (since we need to look 236 * at active vs. inactive bits, etc.) 237 * => caller must make center page busy and write-protect it 238 * => we mark all cluster pages busy for the caller 239 * => the caller must unbusy all pages (and check wanted/released 240 * status if it drops the object lock) 241 * => flags: 242 * PGO_ALLPAGES: all pages in object are valid targets 243 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster 244 * PGO_DOACTCLUST: include active pages in cluster. 245 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST. 246 * PG_CLEANCHK is only a hint, but clearing will help reduce 247 * the number of calls we make to the pmap layer. 248 */ 249 250 struct vm_page ** 251 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi) 252 struct uvm_object *uobj; /* IN */ 253 struct vm_page **pps, *center; /* IN/OUT, IN */ 254 int *npages, flags; /* IN/OUT, IN */ 255 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */ 256 { 257 struct vm_page **ppsp, *pclust; 258 voff_t lo, hi, curoff; 259 int center_idx, forward; 260 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist); 261 262 /* 263 * center page should already be busy and write protected. XXX: 264 * suppose page is wired? if we lock, then a process could 265 * fault/block on it. if we don't lock, a process could write the 266 * pages in the middle of an I/O. (consider an msync()). let's 267 * lock it for now (better to delay than corrupt data?). 268 */ 269 270 /* 271 * get cluster boundaries, check sanity, and apply our limits as well. 272 */ 273 274 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi); 275 if ((flags & PGO_ALLPAGES) == 0) { 276 if (lo < mlo) 277 lo = mlo; 278 if (hi > mhi) 279 hi = mhi; 280 } 281 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */ 282 #ifdef DIAGNOSTIC 283 printf("uvm_mk_pcluster: provided page array too small (fixed)\n"); 284 #endif 285 pps[0] = center; 286 *npages = 1; 287 return(pps); 288 } 289 290 /* 291 * now determine the center and attempt to cluster around the 292 * edges 293 */ 294 295 center_idx = (center->offset - lo) >> PAGE_SHIFT; 296 pps[center_idx] = center; /* plug in the center page */ 297 ppsp = &pps[center_idx]; 298 *npages = 1; 299 300 /* 301 * attempt to cluster around the left [backward], and then 302 * the right side [forward]. 303 * 304 * note that for inactive pages (pages that have been deactivated) 305 * there are no valid mappings and PG_CLEAN should be up to date. 306 * [i.e. there is no need to query the pmap with pmap_is_modified 307 * since there are no mappings]. 308 */ 309 310 for (forward = 0 ; forward <= 1 ; forward++) { 311 312 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE); 313 for ( ;(forward == 0 && curoff >= lo) || 314 (forward && curoff < hi); 315 curoff += (forward ? 1 : -1) << PAGE_SHIFT) { 316 317 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */ 318 if (pclust == NULL) 319 break; /* no page */ 320 /* handle active pages */ 321 /* NOTE: inactive pages don't have pmap mappings */ 322 if ((pclust->pqflags & PQ_INACTIVE) == 0) { 323 if ((flags & PGO_DOACTCLUST) == 0) 324 /* dont want mapped pages at all */ 325 break; 326 327 /* make sure "clean" bit is sync'd */ 328 if ((pclust->flags & PG_CLEANCHK) == 0) { 329 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) 330 == PG_CLEAN && 331 pmap_is_modified(pclust)) 332 pclust->flags &= ~PG_CLEAN; 333 334 /* now checked */ 335 pclust->flags |= PG_CLEANCHK; 336 } 337 } 338 /* is page available for cleaning and does it need it */ 339 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) 340 break; /* page is already clean or is busy */ 341 342 /* yes! enroll the page in our array */ 343 pclust->flags |= PG_BUSY; /* busy! */ 344 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster"); 345 /* XXX: protect wired page? see above comment. */ 346 pmap_page_protect(pclust, VM_PROT_READ); 347 if (!forward) { 348 ppsp--; /* back up one page */ 349 *ppsp = pclust; 350 } else { 351 /* move forward one page */ 352 ppsp[*npages] = pclust; 353 } 354 *npages = *npages + 1; 355 } 356 } 357 358 /* 359 * done! return the cluster array to the caller!!! 360 */ 361 362 UVMHIST_LOG(maphist, "<- done",0,0,0,0); 363 return(ppsp); 364 } 365 366 /* 367 * uvm_pager_put: high level pageout routine 368 * 369 * we want to pageout page "pg" to backing store, clustering if 370 * possible. 371 * 372 * => page queues must be locked by caller 373 * => if page is not swap-backed, then "uobj" points to the object 374 * backing it. this object should be locked by the caller. 375 * => if page is swap-backed, then "uobj" should be NULL. 376 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN 377 * for swap-backed memory, "pg" can be NULL if there is no page 378 * of interest [sometimes the case for the pagedaemon] 379 * => "ppsp_ptr" should point to an array of npages vm_page pointers 380 * for possible cluster building 381 * => flags (first two for non-swap-backed pages) 382 * PGO_ALLPAGES: all pages in uobj are valid targets 383 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets 384 * PGO_SYNCIO: do SYNC I/O (no async) 385 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O 386 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range 387 * if (!uobj) start is the (daddr_t) of the starting swapblk 388 * => return state: 389 * 1. we return the VM_PAGER status code of the pageout 390 * 2. we return with the page queues unlocked 391 * 3. if (uobj != NULL) [!swap_backed] we return with 392 * uobj locked _only_ if PGO_PDFREECLUST is set 393 * AND result != VM_PAGER_PEND. in all other cases 394 * we return with uobj unlocked. [this is a hack 395 * that allows the pagedaemon to save one lock/unlock 396 * pair in the !swap_backed case since we have to 397 * lock the uobj to drop the cluster anyway] 398 * 4. on errors we always drop the cluster. thus, if we return 399 * !PEND, !OK, then the caller only has to worry about 400 * un-busying the main page (not the cluster pages). 401 * 5. on success, if !PGO_PDFREECLUST, we return the cluster 402 * with all pages busy (caller must un-busy and check 403 * wanted/released flags). 404 */ 405 406 int 407 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop) 408 struct uvm_object *uobj; /* IN */ 409 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */ 410 int *npages; /* IN/OUT */ 411 int flags; /* IN */ 412 voff_t start, stop; /* IN, IN */ 413 { 414 int result; 415 daddr_t swblk; 416 struct vm_page **ppsp = *ppsp_ptr; 417 418 /* 419 * note that uobj is null if we are doing a swap-backed pageout. 420 * note that uobj is !null if we are doing normal object pageout. 421 * note that the page queues must be locked to cluster. 422 */ 423 424 if (uobj) { /* if !swap-backed */ 425 426 /* 427 * attempt to build a cluster for pageout using its 428 * make-put-cluster function (if it has one). 429 */ 430 431 if (uobj->pgops->pgo_mk_pcluster) { 432 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp, 433 npages, pg, flags, start, stop); 434 *ppsp_ptr = ppsp; /* update caller's pointer */ 435 } else { 436 ppsp[0] = pg; 437 *npages = 1; 438 } 439 440 swblk = 0; /* XXX: keep gcc happy */ 441 442 } else { 443 444 /* 445 * for swap-backed pageout, the caller (the pagedaemon) has 446 * already built the cluster for us. the starting swap 447 * block we are writing to has been passed in as "start." 448 * "pg" could be NULL if there is no page we are especially 449 * interested in (in which case the whole cluster gets dropped 450 * in the event of an error or a sync "done"). 451 */ 452 swblk = (daddr_t) start; 453 /* ppsp and npages should be ok */ 454 } 455 456 /* now that we've clustered we can unlock the page queues */ 457 uvm_unlock_pageq(); 458 459 /* 460 * now attempt the I/O. if we have a failure and we are 461 * clustered, we will drop the cluster and try again. 462 */ 463 464 ReTry: 465 if (uobj) { 466 /* object is locked */ 467 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, 468 flags & PGO_SYNCIO); 469 /* object is now unlocked */ 470 } else { 471 /* nothing locked */ 472 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO); 473 /* nothing locked */ 474 } 475 476 /* 477 * we have attempted the I/O. 478 * 479 * if the I/O was a success then: 480 * if !PGO_PDFREECLUST, we return the cluster to the 481 * caller (who must un-busy all pages) 482 * else we un-busy cluster pages for the pagedaemon 483 * 484 * if I/O is pending (async i/o) then we return the pending code. 485 * [in this case the async i/o done function must clean up when 486 * i/o is done...] 487 */ 488 489 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) { 490 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) { 491 /* 492 * drop cluster and relock object (only if I/O is 493 * not pending) 494 */ 495 if (uobj) 496 /* required for dropcluster */ 497 simple_lock(&uobj->vmobjlock); 498 if (*npages > 1 || pg == NULL) 499 uvm_pager_dropcluster(uobj, pg, ppsp, npages, 500 PGO_PDFREECLUST); 501 /* if (uobj): object still locked, as per 502 * return-state item #3 */ 503 } 504 return (result); 505 } 506 507 /* 508 * a pager error occured. 509 * for transient errors, drop to a cluster of 1 page ("pg") 510 * and try again. for hard errors, don't bother retrying. 511 */ 512 513 if (*npages > 1 || pg == NULL) { 514 if (uobj) { 515 simple_lock(&uobj->vmobjlock); 516 } 517 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP); 518 519 /* 520 * for failed swap-backed pageouts with a "pg", 521 * we need to reset pg's swslot to either: 522 * "swblk" (for transient errors, so we can retry), 523 * or 0 (for hard errors). 524 */ 525 526 if (uobj == NULL && pg != NULL) { 527 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0; 528 if (pg->pqflags & PQ_ANON) { 529 simple_lock(&pg->uanon->an_lock); 530 pg->uanon->an_swslot = nswblk; 531 simple_unlock(&pg->uanon->an_lock); 532 } else { 533 simple_lock(&pg->uobject->vmobjlock); 534 uao_set_swslot(pg->uobject, 535 pg->offset >> PAGE_SHIFT, 536 nswblk); 537 simple_unlock(&pg->uobject->vmobjlock); 538 } 539 } 540 if (result == VM_PAGER_AGAIN) { 541 542 /* 543 * for transient failures, free all the swslots that 544 * we're not going to retry with. 545 */ 546 547 if (uobj == NULL) { 548 if (pg) { 549 uvm_swap_free(swblk + 1, *npages - 1); 550 } else { 551 uvm_swap_free(swblk, *npages); 552 } 553 } 554 if (pg) { 555 ppsp[0] = pg; 556 *npages = 1; 557 goto ReTry; 558 } 559 } else if (uobj == NULL) { 560 561 /* 562 * for hard errors on swap-backed pageouts, 563 * mark the swslots as bad. note that we do not 564 * free swslots that we mark bad. 565 */ 566 567 uvm_swap_markbad(swblk, *npages); 568 } 569 } 570 571 /* 572 * a pager error occured (even after dropping the cluster, if there 573 * was one). give up! the caller only has one page ("pg") 574 * to worry about. 575 */ 576 577 if (uobj && (flags & PGO_PDFREECLUST) != 0) 578 simple_lock(&uobj->vmobjlock); 579 return(result); 580 } 581 582 /* 583 * uvm_pager_dropcluster: drop a cluster we have built (because we 584 * got an error, or, if PGO_PDFREECLUST we are un-busying the 585 * cluster pages on behalf of the pagedaemon). 586 * 587 * => uobj, if non-null, is a non-swap-backed object that is 588 * locked by the caller. we return with this object still 589 * locked. 590 * => page queues are not locked 591 * => pg is our page of interest (the one we clustered around, can be null) 592 * => ppsp/npages is our current cluster 593 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster 594 * pages on behalf of the pagedaemon. 595 * PGO_REALLOCSWAP: drop previously allocated swap slots for 596 * clustered swap-backed pages (except for "pg" if !NULL) 597 * "swblk" is the start of swap alloc (e.g. for ppsp[0]) 598 * [only meaningful if swap-backed (uobj == NULL)] 599 */ 600 601 void 602 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags) 603 struct uvm_object *uobj; /* IN */ 604 struct vm_page *pg, **ppsp; /* IN, IN/OUT */ 605 int *npages; /* IN/OUT */ 606 int flags; 607 { 608 int lcv; 609 boolean_t obj_is_alive; 610 struct uvm_object *saved_uobj; 611 612 /* 613 * drop all pages but "pg" 614 */ 615 616 for (lcv = 0 ; lcv < *npages ; lcv++) { 617 618 if (ppsp[lcv] == pg) /* skip "pg" */ 619 continue; 620 621 /* 622 * if swap-backed, gain lock on object that owns page. note 623 * that PQ_ANON bit can't change as long as we are holding 624 * the PG_BUSY bit (so there is no need to lock the page 625 * queues to test it). 626 * 627 * once we have the lock, dispose of the pointer to swap, if 628 * requested 629 */ 630 if (!uobj) { 631 if (ppsp[lcv]->pqflags & PQ_ANON) { 632 simple_lock(&ppsp[lcv]->uanon->an_lock); 633 if (flags & PGO_REALLOCSWAP) 634 /* zap swap block */ 635 ppsp[lcv]->uanon->an_swslot = 0; 636 } else { 637 simple_lock(&ppsp[lcv]->uobject->vmobjlock); 638 if (flags & PGO_REALLOCSWAP) 639 uao_set_swslot(ppsp[lcv]->uobject, 640 ppsp[lcv]->offset >> PAGE_SHIFT, 0); 641 } 642 } 643 644 /* did someone want the page while we had it busy-locked? */ 645 if (ppsp[lcv]->flags & PG_WANTED) 646 /* still holding obj lock */ 647 wakeup(ppsp[lcv]); 648 649 /* if page was released, release it. otherwise un-busy it */ 650 if (ppsp[lcv]->flags & PG_RELEASED) { 651 652 if (ppsp[lcv]->pqflags & PQ_ANON) { 653 /* so that anfree will free */ 654 ppsp[lcv]->flags &= ~(PG_BUSY); 655 UVM_PAGE_OWN(ppsp[lcv], NULL); 656 657 pmap_page_protect(ppsp[lcv], VM_PROT_NONE); 658 simple_unlock(&ppsp[lcv]->uanon->an_lock); 659 /* kills anon and frees pg */ 660 uvm_anfree(ppsp[lcv]->uanon); 661 662 continue; 663 } 664 665 /* 666 * pgo_releasepg will dump the page for us 667 */ 668 669 #ifdef DIAGNOSTIC 670 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL) 671 panic("uvm_pager_dropcluster: no releasepg " 672 "function"); 673 #endif 674 saved_uobj = ppsp[lcv]->uobject; 675 obj_is_alive = 676 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL); 677 678 #ifdef DIAGNOSTIC 679 /* for normal objects, "pg" is still PG_BUSY by us, 680 * so obj can't die */ 681 if (uobj && !obj_is_alive) 682 panic("uvm_pager_dropcluster: object died " 683 "with active page"); 684 #endif 685 /* only unlock the object if it is still alive... */ 686 if (obj_is_alive && saved_uobj != uobj) 687 simple_unlock(&saved_uobj->vmobjlock); 688 689 /* 690 * XXXCDC: suppose uobj died in the pgo_releasepg? 691 * how pass that 692 * info up to caller. we are currently ignoring it... 693 */ 694 695 continue; /* next page */ 696 697 } else { 698 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED); 699 UVM_PAGE_OWN(ppsp[lcv], NULL); 700 } 701 702 /* 703 * if we are operating on behalf of the pagedaemon and we 704 * had a successful pageout update the page! 705 */ 706 if (flags & PGO_PDFREECLUST) { 707 pmap_clear_reference(ppsp[lcv]); 708 pmap_clear_modify(ppsp[lcv]); 709 ppsp[lcv]->flags |= PG_CLEAN; 710 } 711 712 /* if anonymous cluster, unlock object and move on */ 713 if (!uobj) { 714 if (ppsp[lcv]->pqflags & PQ_ANON) 715 simple_unlock(&ppsp[lcv]->uanon->an_lock); 716 else 717 simple_unlock(&ppsp[lcv]->uobject->vmobjlock); 718 } 719 } 720 } 721