1 /* $NetBSD: uvm_pager.c,v 1.25 2000/01/11 06:57:50 chs Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp 35 */ 36 37 #include "opt_uvmhist.h" 38 39 /* 40 * uvm_pager.c: generic functions used to assist the pagers. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_kern.h> 51 52 #define UVM_PAGER 53 #include <uvm/uvm.h> 54 55 /* 56 * list of uvm pagers in the system 57 */ 58 59 extern struct uvm_pagerops aobj_pager; 60 extern struct uvm_pagerops uvm_deviceops; 61 extern struct uvm_pagerops uvm_vnodeops; 62 63 struct uvm_pagerops *uvmpagerops[] = { 64 &aobj_pager, 65 &uvm_deviceops, 66 &uvm_vnodeops, 67 }; 68 69 /* 70 * the pager map: provides KVA for I/O 71 */ 72 73 #define PAGER_MAP_SIZE (4 * 1024 * 1024) 74 vm_map_t pager_map; /* XXX */ 75 simple_lock_data_t pager_map_wanted_lock; 76 boolean_t pager_map_wanted; /* locked by pager map */ 77 78 79 /* 80 * uvm_pager_init: init pagers (at boot time) 81 */ 82 83 void 84 uvm_pager_init() 85 { 86 int lcv; 87 88 /* 89 * init pager map 90 */ 91 92 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva, 93 PAGER_MAP_SIZE, 0, FALSE, NULL); 94 simple_lock_init(&pager_map_wanted_lock); 95 pager_map_wanted = FALSE; 96 97 /* 98 * init ASYNC I/O queue 99 */ 100 101 TAILQ_INIT(&uvm.aio_done); 102 103 /* 104 * call pager init functions 105 */ 106 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *); 107 lcv++) { 108 if (uvmpagerops[lcv]->pgo_init) 109 uvmpagerops[lcv]->pgo_init(); 110 } 111 } 112 113 /* 114 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 115 * 116 * we basically just map in a blank map entry to reserve the space in the 117 * map and then use pmap_enter() to put the mappings in by hand. 118 * 119 * XXX It would be nice to know the direction of the I/O, so that we can 120 * XXX map only what is necessary. 121 */ 122 123 vaddr_t 124 uvm_pagermapin(pps, npages, aiop, waitf) 125 struct vm_page **pps; 126 int npages; 127 struct uvm_aiodesc **aiop; /* OUT */ 128 int waitf; 129 { 130 vsize_t size; 131 vaddr_t kva; 132 struct uvm_aiodesc *aio; 133 vaddr_t cva; 134 struct vm_page *pp; 135 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist); 136 137 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)", 138 pps, npages, aiop, waitf); 139 140 ReStart: 141 if (aiop) { 142 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf); 143 if (aio == NULL) 144 return(0); 145 *aiop = aio; 146 } else { 147 aio = NULL; 148 } 149 150 size = npages << PAGE_SHIFT; 151 kva = NULL; /* let system choose VA */ 152 153 if (uvm_map(pager_map, &kva, size, NULL, 154 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) { 155 if (waitf == M_NOWAIT) { 156 if (aio) 157 FREE(aio, M_TEMP); 158 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); 159 return(NULL); 160 } 161 simple_lock(&pager_map_wanted_lock); 162 pager_map_wanted = TRUE; 163 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0); 164 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE, 165 "pager_map",0); 166 goto ReStart; 167 } 168 169 /* got it */ 170 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { 171 pp = *pps++; 172 #ifdef DEBUG 173 if ((pp->flags & PG_BUSY) == 0) 174 panic("uvm_pagermapin: page not busy"); 175 #endif 176 177 /* 178 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that 179 * XXX really necessary? It could lead to unnecessary 180 * XXX instruction cache flushes. 181 */ 182 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp), 183 VM_PROT_DEFAULT, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 184 } 185 186 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0); 187 return(kva); 188 } 189 190 /* 191 * uvm_pagermapout: remove pager_map mapping 192 * 193 * we remove our mappings by hand and then remove the mapping (waking 194 * up anyone wanting space). 195 */ 196 197 void 198 uvm_pagermapout(kva, npages) 199 vaddr_t kva; 200 int npages; 201 { 202 vsize_t size = npages << PAGE_SHIFT; 203 vm_map_entry_t entries; 204 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist); 205 206 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0); 207 208 /* 209 * duplicate uvm_unmap, but add in pager_map_wanted handling. 210 */ 211 212 vm_map_lock(pager_map); 213 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries); 214 simple_lock(&pager_map_wanted_lock); 215 if (pager_map_wanted) { 216 pager_map_wanted = FALSE; 217 wakeup(pager_map); 218 } 219 simple_unlock(&pager_map_wanted_lock); 220 vm_map_unlock(pager_map); 221 if (entries) 222 uvm_unmap_detach(entries, 0); 223 224 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 225 } 226 227 /* 228 * uvm_mk_pcluster 229 * 230 * generic "make 'pager put' cluster" function. a pager can either 231 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this 232 * generic function, or [3] set it to a pager specific function. 233 * 234 * => caller must lock object _and_ pagequeues (since we need to look 235 * at active vs. inactive bits, etc.) 236 * => caller must make center page busy and write-protect it 237 * => we mark all cluster pages busy for the caller 238 * => the caller must unbusy all pages (and check wanted/released 239 * status if it drops the object lock) 240 * => flags: 241 * PGO_ALLPAGES: all pages in object are valid targets 242 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster 243 * PGO_DOACTCLUST: include active pages in cluster. 244 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST. 245 * PG_CLEANCHK is only a hint, but clearing will help reduce 246 * the number of calls we make to the pmap layer. 247 */ 248 249 struct vm_page ** 250 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi) 251 struct uvm_object *uobj; /* IN */ 252 struct vm_page **pps, *center; /* IN/OUT, IN */ 253 int *npages, flags; /* IN/OUT, IN */ 254 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */ 255 { 256 struct vm_page **ppsp, *pclust; 257 vaddr_t lo, hi, curoff; 258 int center_idx, forward; 259 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist); 260 261 /* 262 * center page should already be busy and write protected. XXX: 263 * suppose page is wired? if we lock, then a process could 264 * fault/block on it. if we don't lock, a process could write the 265 * pages in the middle of an I/O. (consider an msync()). let's 266 * lock it for now (better to delay than corrupt data?). 267 */ 268 269 /* 270 * get cluster boundaries, check sanity, and apply our limits as well. 271 */ 272 273 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi); 274 if ((flags & PGO_ALLPAGES) == 0) { 275 if (lo < mlo) 276 lo = mlo; 277 if (hi > mhi) 278 hi = mhi; 279 } 280 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */ 281 #ifdef DIAGNOSTIC 282 printf("uvm_mk_pcluster: provided page array too small (fixed)\n"); 283 #endif 284 pps[0] = center; 285 *npages = 1; 286 return(pps); 287 } 288 289 /* 290 * now determine the center and attempt to cluster around the 291 * edges 292 */ 293 294 center_idx = (center->offset - lo) >> PAGE_SHIFT; 295 pps[center_idx] = center; /* plug in the center page */ 296 ppsp = &pps[center_idx]; 297 *npages = 1; 298 299 /* 300 * attempt to cluster around the left [backward], and then 301 * the right side [forward]. 302 * 303 * note that for inactive pages (pages that have been deactivated) 304 * there are no valid mappings and PG_CLEAN should be up to date. 305 * [i.e. there is no need to query the pmap with pmap_is_modified 306 * since there are no mappings]. 307 */ 308 309 for (forward = 0 ; forward <= 1 ; forward++) { 310 311 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE); 312 for ( ;(forward == 0 && curoff >= lo) || 313 (forward && curoff < hi); 314 curoff += (forward ? 1 : -1) << PAGE_SHIFT) { 315 316 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */ 317 if (pclust == NULL) 318 break; /* no page */ 319 /* handle active pages */ 320 /* NOTE: inactive pages don't have pmap mappings */ 321 if ((pclust->pqflags & PQ_INACTIVE) == 0) { 322 if ((flags & PGO_DOACTCLUST) == 0) 323 /* dont want mapped pages at all */ 324 break; 325 326 /* make sure "clean" bit is sync'd */ 327 if ((pclust->flags & PG_CLEANCHK) == 0) { 328 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) 329 == PG_CLEAN && 330 pmap_is_modified(pclust)) 331 pclust->flags &= ~PG_CLEAN; 332 333 /* now checked */ 334 pclust->flags |= PG_CLEANCHK; 335 } 336 } 337 /* is page available for cleaning and does it need it */ 338 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) 339 break; /* page is already clean or is busy */ 340 341 /* yes! enroll the page in our array */ 342 pclust->flags |= PG_BUSY; /* busy! */ 343 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster"); 344 /* XXX: protect wired page? see above comment. */ 345 pmap_page_protect(pclust, VM_PROT_READ); 346 if (!forward) { 347 ppsp--; /* back up one page */ 348 *ppsp = pclust; 349 } else { 350 /* move forward one page */ 351 ppsp[*npages] = pclust; 352 } 353 *npages = *npages + 1; 354 } 355 } 356 357 /* 358 * done! return the cluster array to the caller!!! 359 */ 360 361 UVMHIST_LOG(maphist, "<- done",0,0,0,0); 362 return(ppsp); 363 } 364 365 366 /* 367 * uvm_shareprot: generic share protect routine 368 * 369 * => caller must lock map entry's map 370 * => caller must lock object pointed to by map entry 371 */ 372 373 void 374 uvm_shareprot(entry, prot) 375 vm_map_entry_t entry; 376 vm_prot_t prot; 377 { 378 struct uvm_object *uobj = entry->object.uvm_obj; 379 struct vm_page *pp; 380 vaddr_t start, stop; 381 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist); 382 383 if (UVM_ET_ISSUBMAP(entry)) 384 panic("uvm_shareprot: non-object attached"); 385 386 start = entry->offset; 387 stop = start + (entry->end - entry->start); 388 389 /* 390 * traverse list of pages in object. if page in range, pmap_prot it 391 */ 392 393 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { 394 if (pp->offset >= start && pp->offset < stop) 395 pmap_page_protect(pp, prot); 396 } 397 UVMHIST_LOG(maphist, "<- done",0,0,0,0); 398 } 399 400 /* 401 * uvm_pager_put: high level pageout routine 402 * 403 * we want to pageout page "pg" to backing store, clustering if 404 * possible. 405 * 406 * => page queues must be locked by caller 407 * => if page is not swap-backed, then "uobj" points to the object 408 * backing it. this object should be locked by the caller. 409 * => if page is swap-backed, then "uobj" should be NULL. 410 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN 411 * for swap-backed memory, "pg" can be NULL if there is no page 412 * of interest [sometimes the case for the pagedaemon] 413 * => "ppsp_ptr" should point to an array of npages vm_page pointers 414 * for possible cluster building 415 * => flags (first two for non-swap-backed pages) 416 * PGO_ALLPAGES: all pages in uobj are valid targets 417 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets 418 * PGO_SYNCIO: do SYNC I/O (no async) 419 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O 420 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range 421 * if (!uobj) start is the (daddr_t) of the starting swapblk 422 * => return state: 423 * 1. we return the VM_PAGER status code of the pageout 424 * 2. we return with the page queues unlocked 425 * 3. if (uobj != NULL) [!swap_backed] we return with 426 * uobj locked _only_ if PGO_PDFREECLUST is set 427 * AND result != VM_PAGER_PEND. in all other cases 428 * we return with uobj unlocked. [this is a hack 429 * that allows the pagedaemon to save one lock/unlock 430 * pair in the !swap_backed case since we have to 431 * lock the uobj to drop the cluster anyway] 432 * 4. on errors we always drop the cluster. thus, if we return 433 * !PEND, !OK, then the caller only has to worry about 434 * un-busying the main page (not the cluster pages). 435 * 5. on success, if !PGO_PDFREECLUST, we return the cluster 436 * with all pages busy (caller must un-busy and check 437 * wanted/released flags). 438 */ 439 440 int 441 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop) 442 struct uvm_object *uobj; /* IN */ 443 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */ 444 int *npages; /* IN/OUT */ 445 int flags; /* IN */ 446 vaddr_t start, stop; /* IN, IN */ 447 { 448 int result; 449 daddr_t swblk; 450 struct vm_page **ppsp = *ppsp_ptr; 451 452 /* 453 * note that uobj is null if we are doing a swap-backed pageout. 454 * note that uobj is !null if we are doing normal object pageout. 455 * note that the page queues must be locked to cluster. 456 */ 457 458 if (uobj) { /* if !swap-backed */ 459 460 /* 461 * attempt to build a cluster for pageout using its 462 * make-put-cluster function (if it has one). 463 */ 464 465 if (uobj->pgops->pgo_mk_pcluster) { 466 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp, 467 npages, pg, flags, start, stop); 468 *ppsp_ptr = ppsp; /* update caller's pointer */ 469 } else { 470 ppsp[0] = pg; 471 *npages = 1; 472 } 473 474 swblk = 0; /* XXX: keep gcc happy */ 475 476 } else { 477 478 /* 479 * for swap-backed pageout, the caller (the pagedaemon) has 480 * already built the cluster for us. the starting swap 481 * block we are writing to has been passed in as "start." 482 * "pg" could be NULL if there is no page we are especially 483 * interested in (in which case the whole cluster gets dropped 484 * in the event of an error or a sync "done"). 485 */ 486 swblk = (daddr_t) start; 487 /* ppsp and npages should be ok */ 488 } 489 490 /* now that we've clustered we can unlock the page queues */ 491 uvm_unlock_pageq(); 492 493 /* 494 * now attempt the I/O. if we have a failure and we are 495 * clustered, we will drop the cluster and try again. 496 */ 497 498 ReTry: 499 if (uobj) { 500 /* object is locked */ 501 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, 502 flags & PGO_SYNCIO); 503 /* object is now unlocked */ 504 } else { 505 /* nothing locked */ 506 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO); 507 /* nothing locked */ 508 } 509 510 /* 511 * we have attempted the I/O. 512 * 513 * if the I/O was a success then: 514 * if !PGO_PDFREECLUST, we return the cluster to the 515 * caller (who must un-busy all pages) 516 * else we un-busy cluster pages for the pagedaemon 517 * 518 * if I/O is pending (async i/o) then we return the pending code. 519 * [in this case the async i/o done function must clean up when 520 * i/o is done...] 521 */ 522 523 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) { 524 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) { 525 /* 526 * drop cluster and relock object (only if I/O is 527 * not pending) 528 */ 529 if (uobj) 530 /* required for dropcluster */ 531 simple_lock(&uobj->vmobjlock); 532 if (*npages > 1 || pg == NULL) 533 uvm_pager_dropcluster(uobj, pg, ppsp, npages, 534 PGO_PDFREECLUST); 535 /* if (uobj): object still locked, as per 536 * return-state item #3 */ 537 } 538 return (result); 539 } 540 541 /* 542 * a pager error occured. 543 * for transient errors, drop to a cluster of 1 page ("pg") 544 * and try again. for hard errors, don't bother retrying. 545 */ 546 547 if (*npages > 1 || pg == NULL) { 548 if (uobj) { 549 simple_lock(&uobj->vmobjlock); 550 } 551 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP); 552 553 /* 554 * for failed swap-backed pageouts with a "pg", 555 * we need to reset pg's swslot to either: 556 * "swblk" (for transient errors, so we can retry), 557 * or 0 (for hard errors). 558 */ 559 560 if (uobj == NULL && pg != NULL) { 561 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0; 562 if (pg->pqflags & PQ_ANON) { 563 simple_lock(&pg->uanon->an_lock); 564 pg->uanon->an_swslot = nswblk; 565 simple_unlock(&pg->uanon->an_lock); 566 } else { 567 simple_lock(&pg->uobject->vmobjlock); 568 uao_set_swslot(pg->uobject, 569 pg->offset >> PAGE_SHIFT, 570 nswblk); 571 simple_unlock(&pg->uobject->vmobjlock); 572 } 573 } 574 if (result == VM_PAGER_AGAIN) { 575 576 /* 577 * for transient failures, free all the swslots that 578 * we're not going to retry with. 579 */ 580 581 if (uobj == NULL) { 582 if (pg) { 583 uvm_swap_free(swblk + 1, *npages - 1); 584 } else { 585 uvm_swap_free(swblk, *npages); 586 } 587 } 588 if (pg) { 589 ppsp[0] = pg; 590 *npages = 1; 591 goto ReTry; 592 } 593 } else if (uobj == NULL) { 594 595 /* 596 * for hard errors on swap-backed pageouts, 597 * mark the swslots as bad. note that we do not 598 * free swslots that we mark bad. 599 */ 600 601 uvm_swap_markbad(swblk, *npages); 602 } 603 } 604 605 /* 606 * a pager error occured (even after dropping the cluster, if there 607 * was one). give up! the caller only has one page ("pg") 608 * to worry about. 609 */ 610 611 if (uobj && (flags & PGO_PDFREECLUST) != 0) 612 simple_lock(&uobj->vmobjlock); 613 return(result); 614 } 615 616 /* 617 * uvm_pager_dropcluster: drop a cluster we have built (because we 618 * got an error, or, if PGO_PDFREECLUST we are un-busying the 619 * cluster pages on behalf of the pagedaemon). 620 * 621 * => uobj, if non-null, is a non-swap-backed object that is 622 * locked by the caller. we return with this object still 623 * locked. 624 * => page queues are not locked 625 * => pg is our page of interest (the one we clustered around, can be null) 626 * => ppsp/npages is our current cluster 627 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster 628 * pages on behalf of the pagedaemon. 629 * PGO_REALLOCSWAP: drop previously allocated swap slots for 630 * clustered swap-backed pages (except for "pg" if !NULL) 631 * "swblk" is the start of swap alloc (e.g. for ppsp[0]) 632 * [only meaningful if swap-backed (uobj == NULL)] 633 */ 634 635 void 636 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags) 637 struct uvm_object *uobj; /* IN */ 638 struct vm_page *pg, **ppsp; /* IN, IN/OUT */ 639 int *npages; /* IN/OUT */ 640 int flags; 641 { 642 int lcv; 643 boolean_t obj_is_alive; 644 struct uvm_object *saved_uobj; 645 646 /* 647 * drop all pages but "pg" 648 */ 649 650 for (lcv = 0 ; lcv < *npages ; lcv++) { 651 652 if (ppsp[lcv] == pg) /* skip "pg" */ 653 continue; 654 655 /* 656 * if swap-backed, gain lock on object that owns page. note 657 * that PQ_ANON bit can't change as long as we are holding 658 * the PG_BUSY bit (so there is no need to lock the page 659 * queues to test it). 660 * 661 * once we have the lock, dispose of the pointer to swap, if 662 * requested 663 */ 664 if (!uobj) { 665 if (ppsp[lcv]->pqflags & PQ_ANON) { 666 simple_lock(&ppsp[lcv]->uanon->an_lock); 667 if (flags & PGO_REALLOCSWAP) 668 /* zap swap block */ 669 ppsp[lcv]->uanon->an_swslot = 0; 670 } else { 671 simple_lock(&ppsp[lcv]->uobject->vmobjlock); 672 if (flags & PGO_REALLOCSWAP) 673 uao_set_swslot(ppsp[lcv]->uobject, 674 ppsp[lcv]->offset >> PAGE_SHIFT, 0); 675 } 676 } 677 678 /* did someone want the page while we had it busy-locked? */ 679 if (ppsp[lcv]->flags & PG_WANTED) 680 /* still holding obj lock */ 681 wakeup(ppsp[lcv]); 682 683 /* if page was released, release it. otherwise un-busy it */ 684 if (ppsp[lcv]->flags & PG_RELEASED) { 685 686 if (ppsp[lcv]->pqflags & PQ_ANON) { 687 /* so that anfree will free */ 688 ppsp[lcv]->flags &= ~(PG_BUSY); 689 UVM_PAGE_OWN(ppsp[lcv], NULL); 690 691 pmap_page_protect(ppsp[lcv], VM_PROT_NONE); 692 simple_unlock(&ppsp[lcv]->uanon->an_lock); 693 /* kills anon and frees pg */ 694 uvm_anfree(ppsp[lcv]->uanon); 695 696 continue; 697 } 698 699 /* 700 * pgo_releasepg will dump the page for us 701 */ 702 703 #ifdef DIAGNOSTIC 704 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL) 705 panic("uvm_pager_dropcluster: no releasepg " 706 "function"); 707 #endif 708 saved_uobj = ppsp[lcv]->uobject; 709 obj_is_alive = 710 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL); 711 712 #ifdef DIAGNOSTIC 713 /* for normal objects, "pg" is still PG_BUSY by us, 714 * so obj can't die */ 715 if (uobj && !obj_is_alive) 716 panic("uvm_pager_dropcluster: object died " 717 "with active page"); 718 #endif 719 /* only unlock the object if it is still alive... */ 720 if (obj_is_alive && saved_uobj != uobj) 721 simple_unlock(&saved_uobj->vmobjlock); 722 723 /* 724 * XXXCDC: suppose uobj died in the pgo_releasepg? 725 * how pass that 726 * info up to caller. we are currently ignoring it... 727 */ 728 729 continue; /* next page */ 730 731 } else { 732 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED); 733 UVM_PAGE_OWN(ppsp[lcv], NULL); 734 } 735 736 /* 737 * if we are operating on behalf of the pagedaemon and we 738 * had a successful pageout update the page! 739 */ 740 if (flags & PGO_PDFREECLUST) { 741 pmap_clear_reference(ppsp[lcv]); 742 pmap_clear_modify(ppsp[lcv]); 743 ppsp[lcv]->flags |= PG_CLEAN; 744 } 745 746 /* if anonymous cluster, unlock object and move on */ 747 if (!uobj) { 748 if (ppsp[lcv]->pqflags & PQ_ANON) 749 simple_unlock(&ppsp[lcv]->uanon->an_lock); 750 else 751 simple_unlock(&ppsp[lcv]->uobject->vmobjlock); 752 } 753 } 754 } 755