1 /* $OpenBSD: uvm_fault.c,v 1.102 2020/09/29 11:47:41 mpi Exp $ */ 2 /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp 29 */ 30 31 /* 32 * uvm_fault.c: fault handler 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/proc.h> 39 #include <sys/malloc.h> 40 #include <sys/mman.h> 41 #include <sys/tracepoint.h> 42 43 #include <uvm/uvm.h> 44 45 /* 46 * 47 * a word on page faults: 48 * 49 * types of page faults we handle: 50 * 51 * CASE 1: upper layer faults CASE 2: lower layer faults 52 * 53 * CASE 1A CASE 1B CASE 2A CASE 2B 54 * read/write1 write>1 read/write +-cow_write/zero 55 * | | | | 56 * +--|--+ +--|--+ +-----+ + | + | +-----+ 57 * amap | V | | ----------->new| | | | ^ | 58 * +-----+ +-----+ +-----+ + | + | +--|--+ 59 * | | | 60 * +-----+ +-----+ +--|--+ | +--|--+ 61 * uobj | d/c | | d/c | | V | +----| | 62 * +-----+ +-----+ +-----+ +-----+ 63 * 64 * d/c = don't care 65 * 66 * case [0]: layerless fault 67 * no amap or uobj is present. this is an error. 68 * 69 * case [1]: upper layer fault [anon active] 70 * 1A: [read] or [write with anon->an_ref == 1] 71 * I/O takes place in top level anon and uobj is not touched. 72 * 1B: [write with anon->an_ref > 1] 73 * new anon is alloc'd and data is copied off ["COW"] 74 * 75 * case [2]: lower layer fault [uobj] 76 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 77 * I/O takes place directly in object. 78 * 2B: [write to copy_on_write] or [read on NULL uobj] 79 * data is "promoted" from uobj to a new anon. 80 * if uobj is null, then we zero fill. 81 * 82 * we follow the standard UVM locking protocol ordering: 83 * 84 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) 85 * we hold a PG_BUSY page if we unlock for I/O 86 * 87 * 88 * the code is structured as follows: 89 * 90 * - init the "IN" params in the ufi structure 91 * ReFault: 92 * - do lookups [locks maps], check protection, handle needs_copy 93 * - check for case 0 fault (error) 94 * - establish "range" of fault 95 * - if we have an amap lock it and extract the anons 96 * - if sequential advice deactivate pages behind us 97 * - at the same time check pmap for unmapped areas and anon for pages 98 * that we could map in (and do map it if found) 99 * - check object for resident pages that we could map in 100 * - if (case 2) goto Case2 101 * - >>> handle case 1 102 * - ensure source anon is resident in RAM 103 * - if case 1B alloc new anon and copy from source 104 * - map the correct page in 105 * Case2: 106 * - >>> handle case 2 107 * - ensure source page is resident (if uobj) 108 * - if case 2B alloc new anon and copy from source (could be zero 109 * fill if uobj == NULL) 110 * - map the correct page in 111 * - done! 112 * 113 * note on paging: 114 * if we have to do I/O we place a PG_BUSY page in the correct object, 115 * unlock everything, and do the I/O. when I/O is done we must reverify 116 * the state of the world before assuming that our data structures are 117 * valid. [because mappings could change while the map is unlocked] 118 * 119 * alternative 1: unbusy the page in question and restart the page fault 120 * from the top (ReFault). this is easy but does not take advantage 121 * of the information that we already have from our previous lookup, 122 * although it is possible that the "hints" in the vm_map will help here. 123 * 124 * alternative 2: the system already keeps track of a "version" number of 125 * a map. [i.e. every time you write-lock a map (e.g. to change a 126 * mapping) you bump the version number up by one...] so, we can save 127 * the version number of the map before we release the lock and start I/O. 128 * then when I/O is done we can relock and check the version numbers 129 * to see if anything changed. this might save us some over 1 because 130 * we don't have to unbusy the page and may be less compares(?). 131 * 132 * alternative 3: put in backpointers or a way to "hold" part of a map 133 * in place while I/O is in progress. this could be complex to 134 * implement (especially with structures like amap that can be referenced 135 * by multiple map entries, and figuring out what should wait could be 136 * complex as well...). 137 * 138 * given that we are not currently multiprocessor or multithreaded we might 139 * as well choose alternative 2 now. maybe alternative 3 would be useful 140 * in the future. XXX keep in mind for future consideration//rechecking. 141 */ 142 143 /* 144 * local data structures 145 */ 146 struct uvm_advice { 147 int nback; 148 int nforw; 149 }; 150 151 /* 152 * page range array: set up in uvmfault_init(). 153 */ 154 static struct uvm_advice uvmadvice[MADV_MASK + 1]; 155 156 #define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */ 157 158 /* 159 * private prototypes 160 */ 161 static void uvmfault_amapcopy(struct uvm_faultinfo *); 162 static inline void uvmfault_anonflush(struct vm_anon **, int); 163 void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t); 164 void uvmfault_update_stats(struct uvm_faultinfo *); 165 166 /* 167 * inline functions 168 */ 169 /* 170 * uvmfault_anonflush: try and deactivate pages in specified anons 171 * 172 * => does not have to deactivate page if it is busy 173 */ 174 static inline void 175 uvmfault_anonflush(struct vm_anon **anons, int n) 176 { 177 int lcv; 178 struct vm_page *pg; 179 180 for (lcv = 0 ; lcv < n ; lcv++) { 181 if (anons[lcv] == NULL) 182 continue; 183 pg = anons[lcv]->an_page; 184 if (pg && (pg->pg_flags & PG_BUSY) == 0) { 185 uvm_lock_pageq(); 186 if (pg->wire_count == 0) { 187 pmap_page_protect(pg, PROT_NONE); 188 uvm_pagedeactivate(pg); 189 } 190 uvm_unlock_pageq(); 191 } 192 } 193 } 194 195 /* 196 * normal functions 197 */ 198 /* 199 * uvmfault_init: compute proper values for the uvmadvice[] array. 200 */ 201 void 202 uvmfault_init(void) 203 { 204 int npages; 205 206 npages = atop(16384); 207 if (npages > 0) { 208 KASSERT(npages <= UVM_MAXRANGE / 2); 209 uvmadvice[MADV_NORMAL].nforw = npages; 210 uvmadvice[MADV_NORMAL].nback = npages - 1; 211 } 212 213 npages = atop(32768); 214 if (npages > 0) { 215 KASSERT(npages <= UVM_MAXRANGE / 2); 216 uvmadvice[MADV_SEQUENTIAL].nforw = npages - 1; 217 uvmadvice[MADV_SEQUENTIAL].nback = npages; 218 } 219 } 220 221 /* 222 * uvmfault_amapcopy: clear "needs_copy" in a map. 223 * 224 * => if we are out of RAM we sleep (waiting for more) 225 */ 226 static void 227 uvmfault_amapcopy(struct uvm_faultinfo *ufi) 228 { 229 230 /* while we haven't done the job */ 231 while (1) { 232 /* no mapping? give up. */ 233 if (uvmfault_lookup(ufi, TRUE) == FALSE) 234 return; 235 236 /* copy if needed. */ 237 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) 238 amap_copy(ufi->map, ufi->entry, M_NOWAIT, 239 UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE, 240 ufi->orig_rvaddr, ufi->orig_rvaddr + 1); 241 242 /* didn't work? must be out of RAM. sleep. */ 243 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { 244 uvmfault_unlockmaps(ufi, TRUE); 245 uvm_wait("fltamapcopy"); 246 continue; 247 } 248 249 /* got it! */ 250 uvmfault_unlockmaps(ufi, TRUE); 251 return; 252 } 253 /*NOTREACHED*/ 254 } 255 256 /* 257 * uvmfault_anonget: get data in an anon into a non-busy, non-released 258 * page in that anon. 259 * 260 * => we don't move the page on the queues [gets moved later] 261 * => if we allocate a new page [we_own], it gets put on the queues. 262 * either way, the result is that the page is on the queues at return time 263 */ 264 int 265 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, 266 struct vm_anon *anon) 267 { 268 boolean_t we_own; /* we own anon's page? */ 269 boolean_t locked; /* did we relock? */ 270 struct vm_page *pg; 271 int result; 272 273 result = 0; /* XXX shut up gcc */ 274 uvmexp.fltanget++; 275 /* bump rusage counters */ 276 if (anon->an_page) 277 curproc->p_ru.ru_minflt++; 278 else 279 curproc->p_ru.ru_majflt++; 280 281 /* loop until we get it, or fail. */ 282 while (1) { 283 we_own = FALSE; /* TRUE if we set PG_BUSY on a page */ 284 pg = anon->an_page; 285 286 /* page there? make sure it is not busy/released. */ 287 if (pg) { 288 KASSERT(pg->pg_flags & PQ_ANON); 289 KASSERT(pg->uanon == anon); 290 291 /* 292 * if the page is busy, we drop all the locks and 293 * try again. 294 */ 295 if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) 296 return (VM_PAGER_OK); 297 atomic_setbits_int(&pg->pg_flags, PG_WANTED); 298 uvmexp.fltpgwait++; 299 300 /* 301 * the last unlock must be an atomic unlock+wait on 302 * the owner of page 303 */ 304 uvmfault_unlockall(ufi, amap, NULL, NULL); 305 tsleep_nsec(pg, PVM, "anonget2", INFSLP); 306 /* ready to relock and try again */ 307 } else { 308 /* no page, we must try and bring it in. */ 309 pg = uvm_pagealloc(NULL, 0, anon, 0); 310 311 if (pg == NULL) { /* out of RAM. */ 312 uvmfault_unlockall(ufi, amap, NULL, anon); 313 uvmexp.fltnoram++; 314 uvm_wait("flt_noram1"); 315 /* ready to relock and try again */ 316 } else { 317 /* we set the PG_BUSY bit */ 318 we_own = TRUE; 319 uvmfault_unlockall(ufi, amap, NULL, anon); 320 321 /* 322 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN 323 * page into the uvm_swap_get function with 324 * all data structures unlocked. note that 325 * it is ok to read an_swslot here because 326 * we hold PG_BUSY on the page. 327 */ 328 uvmexp.pageins++; 329 result = uvm_swap_get(pg, anon->an_swslot, 330 PGO_SYNCIO); 331 332 /* 333 * we clean up after the i/o below in the 334 * "we_own" case 335 */ 336 /* ready to relock and try again */ 337 } 338 } 339 340 /* now relock and try again */ 341 locked = uvmfault_relock(ufi); 342 343 /* 344 * if we own the page (i.e. we set PG_BUSY), then we need 345 * to clean up after the I/O. there are three cases to 346 * consider: 347 * [1] page released during I/O: free anon and ReFault. 348 * [2] I/O not OK. free the page and cause the fault 349 * to fail. 350 * [3] I/O OK! activate the page and sync with the 351 * non-we_own case (i.e. drop anon lock if not locked). 352 */ 353 if (we_own) { 354 if (pg->pg_flags & PG_WANTED) { 355 wakeup(pg); 356 } 357 /* un-busy! */ 358 atomic_clearbits_int(&pg->pg_flags, 359 PG_WANTED|PG_BUSY|PG_FAKE); 360 UVM_PAGE_OWN(pg, NULL); 361 362 /* 363 * if we were RELEASED during I/O, then our anon is 364 * no longer part of an amap. we need to free the 365 * anon and try again. 366 */ 367 if (pg->pg_flags & PG_RELEASED) { 368 pmap_page_protect(pg, PROT_NONE); 369 uvm_anfree(anon); /* frees page for us */ 370 if (locked) 371 uvmfault_unlockall(ufi, amap, NULL, 372 NULL); 373 uvmexp.fltpgrele++; 374 return (VM_PAGER_REFAULT); /* refault! */ 375 } 376 377 if (result != VM_PAGER_OK) { 378 KASSERT(result != VM_PAGER_PEND); 379 380 /* remove page from anon */ 381 anon->an_page = NULL; 382 383 /* 384 * remove the swap slot from the anon 385 * and mark the anon as having no real slot. 386 * don't free the swap slot, thus preventing 387 * it from being used again. 388 */ 389 uvm_swap_markbad(anon->an_swslot, 1); 390 anon->an_swslot = SWSLOT_BAD; 391 392 /* 393 * note: page was never !PG_BUSY, so it 394 * can't be mapped and thus no need to 395 * pmap_page_protect it... 396 */ 397 uvm_lock_pageq(); 398 uvm_pagefree(pg); 399 uvm_unlock_pageq(); 400 401 if (locked) 402 uvmfault_unlockall(ufi, amap, NULL, 403 anon); 404 return (VM_PAGER_ERROR); 405 } 406 407 /* 408 * must be OK, clear modify (already PG_CLEAN) 409 * and activate 410 */ 411 pmap_clear_modify(pg); 412 uvm_lock_pageq(); 413 uvm_pageactivate(pg); 414 uvm_unlock_pageq(); 415 } 416 417 /* we were not able to relock. restart fault. */ 418 if (!locked) 419 return (VM_PAGER_REFAULT); 420 421 /* verify no one touched the amap and moved the anon on us. */ 422 if (ufi != NULL && 423 amap_lookup(&ufi->entry->aref, 424 ufi->orig_rvaddr - ufi->entry->start) != anon) { 425 426 uvmfault_unlockall(ufi, amap, NULL, anon); 427 return (VM_PAGER_REFAULT); 428 } 429 430 /* try it again! */ 431 uvmexp.fltanretry++; 432 continue; 433 434 } /* while (1) */ 435 /*NOTREACHED*/ 436 } 437 438 /* 439 * Update statistics after fault resolution. 440 * - maxrss 441 */ 442 void 443 uvmfault_update_stats(struct uvm_faultinfo *ufi) 444 { 445 struct vm_map *map; 446 struct proc *p; 447 vsize_t res; 448 449 map = ufi->orig_map; 450 451 /* 452 * If this is a nested pmap (eg, a virtual machine pmap managed 453 * by vmm(4) on amd64/i386), don't do any updating, just return. 454 * 455 * pmap_nested() on other archs is #defined to 0, so this is a 456 * no-op. 457 */ 458 if (pmap_nested(map->pmap)) 459 return; 460 461 /* Update the maxrss for the process. */ 462 if (map->flags & VM_MAP_ISVMSPACE) { 463 p = curproc; 464 KASSERT(p != NULL && &p->p_vmspace->vm_map == map); 465 466 res = pmap_resident_count(map->pmap); 467 /* Convert res from pages to kilobytes. */ 468 res <<= (PAGE_SHIFT - 10); 469 470 if (p->p_ru.ru_maxrss < res) 471 p->p_ru.ru_maxrss = res; 472 } 473 } 474 475 /* 476 * F A U L T - m a i n e n t r y p o i n t 477 */ 478 479 /* 480 * uvm_fault: page fault handler 481 * 482 * => called from MD code to resolve a page fault 483 * => VM data structures usually should be unlocked. however, it is 484 * possible to call here with the main map locked if the caller 485 * gets a write lock, sets it recursive, and then calls us (c.f. 486 * uvm_map_pageable). this should be avoided because it keeps 487 * the map locked off during I/O. 488 */ 489 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 490 ~PROT_WRITE : PROT_MASK) 491 int 492 uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type, 493 vm_prot_t access_type) 494 { 495 struct uvm_faultinfo ufi; 496 vm_prot_t enter_prot; 497 boolean_t wired, narrow, promote, locked, shadowed; 498 int npages, nback, nforw, centeridx, result, lcv, gotpages, ret; 499 vaddr_t startva, currva; 500 voff_t uoff; 501 paddr_t pa, pa_flags; 502 struct vm_amap *amap; 503 struct uvm_object *uobj; 504 struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon; 505 struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage; 506 507 anon = NULL; 508 pg = NULL; 509 510 uvmexp.faults++; /* XXX: locking? */ 511 TRACEPOINT(uvm, fault, vaddr, fault_type, access_type, NULL); 512 513 /* init the IN parameters in the ufi */ 514 ufi.orig_map = orig_map; 515 ufi.orig_rvaddr = trunc_page(vaddr); 516 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */ 517 if (fault_type == VM_FAULT_WIRE) 518 narrow = TRUE; /* don't look for neighborhood 519 * pages on wire */ 520 else 521 narrow = FALSE; /* normal fault */ 522 523 /* "goto ReFault" means restart the page fault from ground zero. */ 524 ReFault: 525 /* lookup and lock the maps */ 526 if (uvmfault_lookup(&ufi, FALSE) == FALSE) { 527 return (EFAULT); 528 } 529 530 #ifdef DIAGNOSTIC 531 if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) 532 panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)", 533 ufi.map, vaddr); 534 #endif 535 536 /* check protection */ 537 if ((ufi.entry->protection & access_type) != access_type) { 538 uvmfault_unlockmaps(&ufi, FALSE); 539 return (EACCES); 540 } 541 542 /* 543 * "enter_prot" is the protection we want to enter the page in at. 544 * for certain pages (e.g. copy-on-write pages) this protection can 545 * be more strict than ufi.entry->protection. "wired" means either 546 * the entry is wired or we are fault-wiring the pg. 547 */ 548 549 enter_prot = ufi.entry->protection; 550 pa_flags = UVM_ET_ISWC(ufi.entry) ? PMAP_WC : 0; 551 wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE); 552 if (wired) 553 access_type = enter_prot; /* full access for wired */ 554 555 /* handle "needs_copy" case. */ 556 if (UVM_ET_ISNEEDSCOPY(ufi.entry)) { 557 if ((access_type & PROT_WRITE) || 558 (ufi.entry->object.uvm_obj == NULL)) { 559 /* need to clear */ 560 uvmfault_unlockmaps(&ufi, FALSE); 561 uvmfault_amapcopy(&ufi); 562 uvmexp.fltamcopy++; 563 goto ReFault; 564 } else { 565 /* 566 * ensure that we pmap_enter page R/O since 567 * needs_copy is still true 568 */ 569 enter_prot &= ~PROT_WRITE; 570 } 571 } 572 573 /* identify the players */ 574 amap = ufi.entry->aref.ar_amap; /* top layer */ 575 uobj = ufi.entry->object.uvm_obj; /* bottom layer */ 576 577 /* 578 * check for a case 0 fault. if nothing backing the entry then 579 * error now. 580 */ 581 if (amap == NULL && uobj == NULL) { 582 uvmfault_unlockmaps(&ufi, FALSE); 583 return (EFAULT); 584 } 585 586 /* 587 * establish range of interest based on advice from mapper 588 * and then clip to fit map entry. note that we only want 589 * to do this the first time through the fault. if we 590 * ReFault we will disable this by setting "narrow" to true. 591 */ 592 if (narrow == FALSE) { 593 594 /* wide fault (!narrow) */ 595 nback = min(uvmadvice[ufi.entry->advice].nback, 596 (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT); 597 startva = ufi.orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT); 598 nforw = min(uvmadvice[ufi.entry->advice].nforw, 599 ((ufi.entry->end - ufi.orig_rvaddr) >> 600 PAGE_SHIFT) - 1); 601 /* 602 * note: "-1" because we don't want to count the 603 * faulting page as forw 604 */ 605 npages = nback + nforw + 1; 606 centeridx = nback; 607 608 narrow = TRUE; /* ensure only once per-fault */ 609 } else { 610 /* narrow fault! */ 611 nback = nforw = 0; 612 startva = ufi.orig_rvaddr; 613 npages = 1; 614 centeridx = 0; 615 } 616 617 /* if we've got an amap, extract current anons. */ 618 if (amap) { 619 anons = anons_store; 620 amap_lookups(&ufi.entry->aref, startva - ufi.entry->start, 621 anons, npages); 622 } else { 623 anons = NULL; /* to be safe */ 624 } 625 626 /* 627 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages 628 * now and then forget about them (for the rest of the fault). 629 */ 630 if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) { 631 /* flush back-page anons? */ 632 if (amap) 633 uvmfault_anonflush(anons, nback); 634 635 /* flush object? */ 636 if (uobj) { 637 uoff = (startva - ufi.entry->start) + ufi.entry->offset; 638 (void) uobj->pgops->pgo_flush(uobj, uoff, uoff + 639 ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE); 640 } 641 642 /* now forget about the backpages */ 643 if (amap) 644 anons += nback; 645 startva += ((vsize_t)nback << PAGE_SHIFT); 646 npages -= nback; 647 centeridx = 0; 648 } 649 650 /* 651 * map in the backpages and frontpages we found in the amap in hopes 652 * of preventing future faults. we also init the pages[] array as 653 * we go. 654 */ 655 currva = startva; 656 shadowed = FALSE; 657 for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) { 658 /* 659 * dont play with VAs that are already mapped 660 * except for center) 661 */ 662 if (lcv != centeridx && 663 pmap_extract(ufi.orig_map->pmap, currva, &pa)) { 664 pages[lcv] = PGO_DONTCARE; 665 continue; 666 } 667 668 /* unmapped or center page. check if any anon at this level. */ 669 if (amap == NULL || anons[lcv] == NULL) { 670 pages[lcv] = NULL; 671 continue; 672 } 673 674 /* check for present page and map if possible. re-activate it. */ 675 pages[lcv] = PGO_DONTCARE; 676 if (lcv == centeridx) { /* save center for later! */ 677 shadowed = TRUE; 678 continue; 679 } 680 anon = anons[lcv]; 681 if (anon->an_page && 682 (anon->an_page->pg_flags & (PG_RELEASED|PG_BUSY)) == 0) { 683 uvm_lock_pageq(); 684 uvm_pageactivate(anon->an_page); /* reactivate */ 685 uvm_unlock_pageq(); 686 uvmexp.fltnamap++; 687 688 /* 689 * Since this isn't the page that's actually faulting, 690 * ignore pmap_enter() failures; it's not critical 691 * that we enter these right now. 692 */ 693 (void) pmap_enter(ufi.orig_map->pmap, currva, 694 VM_PAGE_TO_PHYS(anon->an_page) | pa_flags, 695 (anon->an_ref > 1) ? (enter_prot & ~PROT_WRITE) : 696 enter_prot, 697 PMAP_CANFAIL | 698 (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0)); 699 } 700 } 701 if (npages > 1) 702 pmap_update(ufi.orig_map->pmap); 703 704 /* (shadowed == TRUE) if there is an anon at the faulting address */ 705 /* 706 * if the desired page is not shadowed by the amap and we have a 707 * backing object, then we check to see if the backing object would 708 * prefer to handle the fault itself (rather than letting us do it 709 * with the usual pgo_get hook). the backing object signals this by 710 * providing a pgo_fault routine. 711 */ 712 if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) { 713 result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages, 714 centeridx, fault_type, access_type, 715 PGO_LOCKED); 716 717 if (result == VM_PAGER_OK) 718 return (0); /* pgo_fault did pmap enter */ 719 else if (result == VM_PAGER_REFAULT) 720 goto ReFault; /* try again! */ 721 else 722 return (EACCES); 723 } 724 725 /* 726 * now, if the desired page is not shadowed by the amap and we have 727 * a backing object that does not have a special fault routine, then 728 * we ask (with pgo_get) the object for resident pages that we care 729 * about and attempt to map them in. we do not let pgo_get block 730 * (PGO_LOCKED). 731 * 732 * ("get" has the option of doing a pmap_enter for us) 733 */ 734 if (uobj && shadowed == FALSE) { 735 uvmexp.fltlget++; 736 gotpages = npages; 737 (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset + 738 (startva - ufi.entry->start), 739 pages, &gotpages, centeridx, 740 access_type & MASK(ufi.entry), 741 ufi.entry->advice, PGO_LOCKED); 742 743 /* check for pages to map, if we got any */ 744 uobjpage = NULL; 745 if (gotpages) { 746 currva = startva; 747 for (lcv = 0 ; lcv < npages ; 748 lcv++, currva += PAGE_SIZE) { 749 if (pages[lcv] == NULL || 750 pages[lcv] == PGO_DONTCARE) 751 continue; 752 753 KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0); 754 755 /* 756 * if center page is resident and not 757 * PG_BUSY, then pgo_get made it PG_BUSY 758 * for us and gave us a handle to it. 759 * remember this page as "uobjpage." 760 * (for later use). 761 */ 762 if (lcv == centeridx) { 763 uobjpage = pages[lcv]; 764 continue; 765 } 766 767 /* 768 * note: calling pgo_get with locked data 769 * structures returns us pages which are 770 * neither busy nor released, so we don't 771 * need to check for this. we can just 772 * directly enter the page (after moving it 773 * to the head of the active queue [useful?]). 774 */ 775 776 uvm_lock_pageq(); 777 uvm_pageactivate(pages[lcv]); /* reactivate */ 778 uvm_unlock_pageq(); 779 uvmexp.fltnomap++; 780 781 /* 782 * Since this page isn't the page that's 783 * actually faulting, ignore pmap_enter() 784 * failures; it's not critical that we 785 * enter these right now. 786 */ 787 (void) pmap_enter(ufi.orig_map->pmap, currva, 788 VM_PAGE_TO_PHYS(pages[lcv]) | pa_flags, 789 enter_prot & MASK(ufi.entry), 790 PMAP_CANFAIL | 791 (wired ? PMAP_WIRED : 0)); 792 793 /* 794 * NOTE: page can't be PG_WANTED because 795 * we've held the lock the whole time 796 * we've had the handle. 797 */ 798 atomic_clearbits_int(&pages[lcv]->pg_flags, 799 PG_BUSY); 800 UVM_PAGE_OWN(pages[lcv], NULL); 801 } /* for "lcv" loop */ 802 pmap_update(ufi.orig_map->pmap); 803 } /* "gotpages" != 0 */ 804 /* note: object still _locked_ */ 805 } else { 806 uobjpage = NULL; 807 } 808 809 /* 810 * note that at this point we are done with any front or back pages. 811 * we are now going to focus on the center page (i.e. the one we've 812 * faulted on). if we have faulted on the top (anon) layer 813 * [i.e. case 1], then the anon we want is anons[centeridx] (we have 814 * not touched it yet). if we have faulted on the bottom (uobj) 815 * layer [i.e. case 2] and the page was both present and available, 816 * then we've got a pointer to it as "uobjpage" and we've already 817 * made it BUSY. 818 */ 819 /* 820 * there are four possible cases we must address: 1A, 1B, 2A, and 2B 821 */ 822 /* redirect case 2: if we are not shadowed, go to case 2. */ 823 if (shadowed == FALSE) 824 goto Case2; 825 826 /* handle case 1: fault on an anon in our amap */ 827 anon = anons[centeridx]; 828 829 /* 830 * no matter if we have case 1A or case 1B we are going to need to 831 * have the anon's memory resident. ensure that now. 832 */ 833 /* 834 * let uvmfault_anonget do the dirty work. 835 * also, if it is OK, then the anon's page is on the queues. 836 */ 837 result = uvmfault_anonget(&ufi, amap, anon); 838 switch (result) { 839 case VM_PAGER_OK: 840 break; 841 842 case VM_PAGER_REFAULT: 843 goto ReFault; 844 845 case VM_PAGER_ERROR: 846 /* 847 * An error occured while trying to bring in the 848 * page -- this is the only error we return right 849 * now. 850 */ 851 return (EACCES); /* XXX */ 852 default: 853 #ifdef DIAGNOSTIC 854 panic("uvm_fault: uvmfault_anonget -> %d", result); 855 #else 856 return (EACCES); 857 #endif 858 } 859 860 /* 861 * if we are case 1B then we will need to allocate a new blank 862 * anon to transfer the data into. note that we have a lock 863 * on anon, so no one can busy or release the page until we are done. 864 * also note that the ref count can't drop to zero here because 865 * it is > 1 and we are only dropping one ref. 866 * 867 * in the (hopefully very rare) case that we are out of RAM we 868 * will wait for more RAM, and refault. 869 * 870 * if we are out of anon VM we wait for RAM to become available. 871 */ 872 873 if ((access_type & PROT_WRITE) != 0 && anon->an_ref > 1) { 874 uvmexp.flt_acow++; 875 oanon = anon; /* oanon = old */ 876 anon = uvm_analloc(); 877 if (anon) { 878 pg = uvm_pagealloc(NULL, 0, anon, 0); 879 } 880 881 /* check for out of RAM */ 882 if (anon == NULL || pg == NULL) { 883 uvmfault_unlockall(&ufi, amap, NULL, oanon); 884 if (anon == NULL) 885 uvmexp.fltnoanon++; 886 else { 887 uvm_anfree(anon); 888 uvmexp.fltnoram++; 889 } 890 891 if (uvm_swapisfull()) 892 return (ENOMEM); 893 894 /* out of RAM, wait for more */ 895 if (anon == NULL) 896 uvm_anwait(); 897 else 898 uvm_wait("flt_noram3"); 899 goto ReFault; 900 } 901 902 /* got all resources, replace anon with nanon */ 903 uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */ 904 /* un-busy! new page */ 905 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE); 906 UVM_PAGE_OWN(pg, NULL); 907 ret = amap_add(&ufi.entry->aref, 908 ufi.orig_rvaddr - ufi.entry->start, anon, 1); 909 KASSERT(ret == 0); 910 911 /* deref: can not drop to zero here by defn! */ 912 oanon->an_ref--; 913 914 /* 915 * note: anon is _not_ locked, but we have the sole references 916 * to in from amap. 917 * thus, no one can get at it until we are done with it. 918 */ 919 } else { 920 uvmexp.flt_anon++; 921 oanon = anon; 922 pg = anon->an_page; 923 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ 924 enter_prot = enter_prot & ~PROT_WRITE; 925 } 926 927 /* 928 * now map the page in ... 929 * XXX: old fault unlocks object before pmap_enter. this seems 930 * suspect since some other thread could blast the page out from 931 * under us between the unlock and the pmap_enter. 932 */ 933 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, 934 VM_PAGE_TO_PHYS(pg) | pa_flags, enter_prot, 935 access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { 936 /* 937 * No need to undo what we did; we can simply think of 938 * this as the pmap throwing away the mapping information. 939 * 940 * We do, however, have to go through the ReFault path, 941 * as the map may change while we're asleep. 942 */ 943 uvmfault_unlockall(&ufi, amap, NULL, oanon); 944 if (uvm_swapisfull()) { 945 /* XXX instrumentation */ 946 return (ENOMEM); 947 } 948 /* XXX instrumentation */ 949 uvm_wait("flt_pmfail1"); 950 goto ReFault; 951 } 952 953 /* ... update the page queues. */ 954 uvm_lock_pageq(); 955 956 if (fault_type == VM_FAULT_WIRE) { 957 uvm_pagewire(pg); 958 /* 959 * since the now-wired page cannot be paged out, 960 * release its swap resources for others to use. 961 * since an anon with no swap cannot be PG_CLEAN, 962 * clear its clean flag now. 963 */ 964 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 965 uvm_anon_dropswap(anon); 966 } else { 967 /* activate it */ 968 uvm_pageactivate(pg); 969 } 970 971 uvm_unlock_pageq(); 972 973 /* done case 1! finish up by unlocking everything and returning success */ 974 uvmfault_unlockall(&ufi, amap, NULL, oanon); 975 pmap_update(ufi.orig_map->pmap); 976 return (0); 977 978 979 Case2: 980 /* handle case 2: faulting on backing object or zero fill */ 981 /* 982 * note that uobjpage can not be PGO_DONTCARE at this point. we now 983 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we 984 * have a backing object, check and see if we are going to promote 985 * the data up to an anon during the fault. 986 */ 987 if (uobj == NULL) { 988 uobjpage = PGO_DONTCARE; 989 promote = TRUE; /* always need anon here */ 990 } else { 991 KASSERT(uobjpage != PGO_DONTCARE); 992 promote = (access_type & PROT_WRITE) && 993 UVM_ET_ISCOPYONWRITE(ufi.entry); 994 } 995 996 /* 997 * if uobjpage is not null then we do not need to do I/O to get the 998 * uobjpage. 999 * 1000 * if uobjpage is null, then we need to ask the pager to 1001 * get the data for us. once we have the data, we need to reverify 1002 * the state the world. we are currently not holding any resources. 1003 */ 1004 if (uobjpage) { 1005 /* update rusage counters */ 1006 curproc->p_ru.ru_minflt++; 1007 } else { 1008 /* update rusage counters */ 1009 curproc->p_ru.ru_majflt++; 1010 1011 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1012 1013 uvmexp.fltget++; 1014 gotpages = 1; 1015 uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset; 1016 result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, 1017 0, access_type & MASK(ufi.entry), ufi.entry->advice, 1018 PGO_SYNCIO); 1019 1020 /* recover from I/O */ 1021 if (result != VM_PAGER_OK) { 1022 KASSERT(result != VM_PAGER_PEND); 1023 1024 if (result == VM_PAGER_AGAIN) { 1025 tsleep_nsec(&lbolt, PVM, "fltagain2", INFSLP); 1026 goto ReFault; 1027 } 1028 1029 if (!UVM_ET_ISNOFAULT(ufi.entry)) 1030 return (EIO); 1031 1032 uobjpage = PGO_DONTCARE; 1033 promote = TRUE; 1034 } 1035 1036 /* re-verify the state of the world. */ 1037 locked = uvmfault_relock(&ufi); 1038 1039 /* 1040 * Re-verify that amap slot is still free. if there is 1041 * a problem, we clean up. 1042 */ 1043 if (locked && amap && amap_lookup(&ufi.entry->aref, 1044 ufi.orig_rvaddr - ufi.entry->start)) { 1045 if (locked) 1046 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1047 locked = FALSE; 1048 } 1049 1050 /* didn't get the lock? release the page and retry. */ 1051 if (locked == FALSE && uobjpage != PGO_DONTCARE) { 1052 uvm_lock_pageq(); 1053 /* make sure it is in queues */ 1054 uvm_pageactivate(uobjpage); 1055 uvm_unlock_pageq(); 1056 1057 if (uobjpage->pg_flags & PG_WANTED) 1058 /* still holding object lock */ 1059 wakeup(uobjpage); 1060 atomic_clearbits_int(&uobjpage->pg_flags, 1061 PG_BUSY|PG_WANTED); 1062 UVM_PAGE_OWN(uobjpage, NULL); 1063 goto ReFault; 1064 } 1065 if (locked == FALSE) 1066 goto ReFault; 1067 1068 /* 1069 * we have the data in uobjpage which is PG_BUSY 1070 */ 1071 } 1072 1073 /* 1074 * notes: 1075 * - at this point uobjpage can not be NULL 1076 * - at this point uobjpage could be PG_WANTED (handle later) 1077 */ 1078 if (promote == FALSE) { 1079 /* 1080 * we are not promoting. if the mapping is COW ensure that we 1081 * don't give more access than we should (e.g. when doing a read 1082 * fault on a COPYONWRITE mapping we want to map the COW page in 1083 * R/O even though the entry protection could be R/W). 1084 * 1085 * set "pg" to the page we want to map in (uobjpage, usually) 1086 */ 1087 uvmexp.flt_obj++; 1088 if (UVM_ET_ISCOPYONWRITE(ufi.entry)) 1089 enter_prot &= ~PROT_WRITE; 1090 pg = uobjpage; /* map in the actual object */ 1091 1092 /* assert(uobjpage != PGO_DONTCARE) */ 1093 1094 /* 1095 * we are faulting directly on the page. 1096 */ 1097 } else { 1098 /* 1099 * if we are going to promote the data to an anon we 1100 * allocate a blank anon here and plug it into our amap. 1101 */ 1102 #ifdef DIAGNOSTIC 1103 if (amap == NULL) 1104 panic("uvm_fault: want to promote data, but no anon"); 1105 #endif 1106 1107 anon = uvm_analloc(); 1108 if (anon) { 1109 /* 1110 * In `Fill in data...' below, if 1111 * uobjpage == PGO_DONTCARE, we want 1112 * a zero'd, dirty page, so have 1113 * uvm_pagealloc() do that for us. 1114 */ 1115 pg = uvm_pagealloc(NULL, 0, anon, 1116 (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0); 1117 } 1118 1119 /* 1120 * out of memory resources? 1121 */ 1122 if (anon == NULL || pg == NULL) { 1123 /* arg! must unbusy our page and fail or sleep. */ 1124 if (uobjpage != PGO_DONTCARE) { 1125 uvm_lock_pageq(); 1126 uvm_pageactivate(uobjpage); 1127 uvm_unlock_pageq(); 1128 1129 if (uobjpage->pg_flags & PG_WANTED) 1130 wakeup(uobjpage); 1131 atomic_clearbits_int(&uobjpage->pg_flags, 1132 PG_BUSY|PG_WANTED); 1133 UVM_PAGE_OWN(uobjpage, NULL); 1134 } 1135 1136 /* unlock and fail ... */ 1137 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1138 if (anon == NULL) 1139 uvmexp.fltnoanon++; 1140 else { 1141 uvm_anfree(anon); 1142 uvmexp.fltnoram++; 1143 } 1144 1145 if (uvm_swapisfull()) 1146 return (ENOMEM); 1147 1148 /* out of RAM, wait for more */ 1149 if (anon == NULL) 1150 uvm_anwait(); 1151 else 1152 uvm_wait("flt_noram5"); 1153 goto ReFault; 1154 } 1155 1156 /* fill in the data */ 1157 if (uobjpage != PGO_DONTCARE) { 1158 uvmexp.flt_prcopy++; 1159 /* copy page [pg now dirty] */ 1160 uvm_pagecopy(uobjpage, pg); 1161 1162 /* 1163 * promote to shared amap? make sure all sharing 1164 * procs see it 1165 */ 1166 if ((amap_flags(amap) & AMAP_SHARED) != 0) { 1167 pmap_page_protect(uobjpage, PROT_NONE); 1168 } 1169 1170 /* dispose of uobjpage. drop handle to uobj as well. */ 1171 if (uobjpage->pg_flags & PG_WANTED) 1172 wakeup(uobjpage); 1173 atomic_clearbits_int(&uobjpage->pg_flags, 1174 PG_BUSY|PG_WANTED); 1175 UVM_PAGE_OWN(uobjpage, NULL); 1176 uvm_lock_pageq(); 1177 uvm_pageactivate(uobjpage); 1178 uvm_unlock_pageq(); 1179 uobj = NULL; 1180 } else { 1181 uvmexp.flt_przero++; 1182 /* 1183 * Page is zero'd and marked dirty by uvm_pagealloc() 1184 * above. 1185 */ 1186 } 1187 1188 if (amap_add(&ufi.entry->aref, 1189 ufi.orig_rvaddr - ufi.entry->start, anon, 0)) { 1190 uvmfault_unlockall(&ufi, amap, NULL, oanon); 1191 uvm_anfree(anon); 1192 uvmexp.fltnoamap++; 1193 1194 if (uvm_swapisfull()) 1195 return (ENOMEM); 1196 1197 amap_populate(&ufi.entry->aref, 1198 ufi.orig_rvaddr - ufi.entry->start); 1199 goto ReFault; 1200 } 1201 } 1202 1203 /* note: pg is either the uobjpage or the new page in the new anon */ 1204 /* 1205 * all resources are present. we can now map it in and free our 1206 * resources. 1207 */ 1208 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, 1209 VM_PAGE_TO_PHYS(pg) | pa_flags, enter_prot, 1210 access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { 1211 /* 1212 * No need to undo what we did; we can simply think of 1213 * this as the pmap throwing away the mapping information. 1214 * 1215 * We do, however, have to go through the ReFault path, 1216 * as the map may change while we're asleep. 1217 */ 1218 if (pg->pg_flags & PG_WANTED) 1219 wakeup(pg); 1220 1221 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); 1222 UVM_PAGE_OWN(pg, NULL); 1223 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1224 if (uvm_swapisfull()) { 1225 /* XXX instrumentation */ 1226 return (ENOMEM); 1227 } 1228 /* XXX instrumentation */ 1229 uvm_wait("flt_pmfail2"); 1230 goto ReFault; 1231 } 1232 1233 uvm_lock_pageq(); 1234 1235 if (fault_type == VM_FAULT_WIRE) { 1236 uvm_pagewire(pg); 1237 if (pg->pg_flags & PQ_AOBJ) { 1238 /* 1239 * since the now-wired page cannot be paged out, 1240 * release its swap resources for others to use. 1241 * since an aobj page with no swap cannot be PG_CLEAN, 1242 * clear its clean flag now. 1243 */ 1244 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1245 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 1246 } 1247 } else { 1248 /* activate it */ 1249 uvm_pageactivate(pg); 1250 } 1251 uvm_unlock_pageq(); 1252 1253 if (pg->pg_flags & PG_WANTED) 1254 wakeup(pg); 1255 1256 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); 1257 UVM_PAGE_OWN(pg, NULL); 1258 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1259 pmap_update(ufi.orig_map->pmap); 1260 1261 return (0); 1262 } 1263 1264 1265 /* 1266 * uvm_fault_wire: wire down a range of virtual addresses in a map. 1267 * 1268 * => map may be read-locked by caller, but MUST NOT be write-locked. 1269 * => if map is read-locked, any operations which may cause map to 1270 * be write-locked in uvm_fault() must be taken care of by 1271 * the caller. See uvm_map_pageable(). 1272 */ 1273 int 1274 uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type) 1275 { 1276 vaddr_t va; 1277 int rv; 1278 1279 /* 1280 * now fault it in a page at a time. if the fault fails then we have 1281 * to undo what we have done. note that in uvm_fault PROT_NONE 1282 * is replaced with the max protection if fault_type is VM_FAULT_WIRE. 1283 */ 1284 for (va = start ; va < end ; va += PAGE_SIZE) { 1285 rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type); 1286 if (rv) { 1287 if (va != start) { 1288 uvm_fault_unwire(map, start, va); 1289 } 1290 return (rv); 1291 } 1292 } 1293 1294 return (0); 1295 } 1296 1297 /* 1298 * uvm_fault_unwire(): unwire range of virtual space. 1299 */ 1300 void 1301 uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end) 1302 { 1303 1304 vm_map_lock_read(map); 1305 uvm_fault_unwire_locked(map, start, end); 1306 vm_map_unlock_read(map); 1307 } 1308 1309 /* 1310 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire(). 1311 * 1312 * => map must be at least read-locked. 1313 */ 1314 void 1315 uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end) 1316 { 1317 vm_map_entry_t entry, next; 1318 pmap_t pmap = vm_map_pmap(map); 1319 vaddr_t va; 1320 paddr_t pa; 1321 struct vm_page *pg; 1322 1323 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); 1324 1325 /* 1326 * we assume that the area we are unwiring has actually been wired 1327 * in the first place. this means that we should be able to extract 1328 * the PAs from the pmap. we also lock out the page daemon so that 1329 * we can call uvm_pageunwire. 1330 */ 1331 uvm_lock_pageq(); 1332 1333 /* find the beginning map entry for the region. */ 1334 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map)); 1335 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) 1336 panic("uvm_fault_unwire_locked: address not in map"); 1337 1338 for (va = start; va < end ; va += PAGE_SIZE) { 1339 if (pmap_extract(pmap, va, &pa) == FALSE) 1340 continue; 1341 1342 /* find the map entry for the current address. */ 1343 KASSERT(va >= entry->start); 1344 while (va >= entry->end) { 1345 next = RBT_NEXT(uvm_map_addr, entry); 1346 KASSERT(next != NULL && next->start <= entry->end); 1347 entry = next; 1348 } 1349 1350 /* if the entry is no longer wired, tell the pmap. */ 1351 if (VM_MAPENT_ISWIRED(entry) == 0) 1352 pmap_unwire(pmap, va); 1353 1354 pg = PHYS_TO_VM_PAGE(pa); 1355 if (pg) 1356 uvm_pageunwire(pg); 1357 } 1358 1359 uvm_unlock_pageq(); 1360 } 1361 1362 /* 1363 * uvmfault_unlockmaps: unlock the maps 1364 */ 1365 void 1366 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked) 1367 { 1368 /* 1369 * ufi can be NULL when this isn't really a fault, 1370 * but merely paging in anon data. 1371 */ 1372 if (ufi == NULL) { 1373 return; 1374 } 1375 1376 uvmfault_update_stats(ufi); 1377 if (write_locked) { 1378 vm_map_unlock(ufi->map); 1379 } else { 1380 vm_map_unlock_read(ufi->map); 1381 } 1382 } 1383 1384 /* 1385 * uvmfault_unlockall: unlock everything passed in. 1386 * 1387 * => maps must be read-locked (not write-locked). 1388 */ 1389 void 1390 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap, 1391 struct uvm_object *uobj, struct vm_anon *anon) 1392 { 1393 1394 uvmfault_unlockmaps(ufi, FALSE); 1395 } 1396 1397 /* 1398 * uvmfault_lookup: lookup a virtual address in a map 1399 * 1400 * => caller must provide a uvm_faultinfo structure with the IN 1401 * params properly filled in 1402 * => we will lookup the map entry (handling submaps) as we go 1403 * => if the lookup is a success we will return with the maps locked 1404 * => if "write_lock" is TRUE, we write_lock the map, otherwise we only 1405 * get a read lock. 1406 * => note that submaps can only appear in the kernel and they are 1407 * required to use the same virtual addresses as the map they 1408 * are referenced by (thus address translation between the main 1409 * map and the submap is unnecessary). 1410 */ 1411 1412 boolean_t 1413 uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock) 1414 { 1415 vm_map_t tmpmap; 1416 1417 /* init ufi values for lookup. */ 1418 ufi->map = ufi->orig_map; 1419 ufi->size = ufi->orig_size; 1420 1421 /* 1422 * keep going down levels until we are done. note that there can 1423 * only be two levels so we won't loop very long. 1424 */ 1425 while (1) { 1426 if (ufi->orig_rvaddr < ufi->map->min_offset || 1427 ufi->orig_rvaddr >= ufi->map->max_offset) 1428 return(FALSE); 1429 1430 /* lock map */ 1431 if (write_lock) { 1432 vm_map_lock(ufi->map); 1433 } else { 1434 vm_map_lock_read(ufi->map); 1435 } 1436 1437 /* lookup */ 1438 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr, 1439 &ufi->entry)) { 1440 uvmfault_unlockmaps(ufi, write_lock); 1441 return(FALSE); 1442 } 1443 1444 /* reduce size if necessary */ 1445 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size) 1446 ufi->size = ufi->entry->end - ufi->orig_rvaddr; 1447 1448 /* 1449 * submap? replace map with the submap and lookup again. 1450 * note: VAs in submaps must match VAs in main map. 1451 */ 1452 if (UVM_ET_ISSUBMAP(ufi->entry)) { 1453 tmpmap = ufi->entry->object.sub_map; 1454 uvmfault_unlockmaps(ufi, write_lock); 1455 ufi->map = tmpmap; 1456 continue; 1457 } 1458 1459 /* got it! */ 1460 ufi->mapv = ufi->map->timestamp; 1461 return(TRUE); 1462 1463 } 1464 /*NOTREACHED*/ 1465 } 1466 1467 /* 1468 * uvmfault_relock: attempt to relock the same version of the map 1469 * 1470 * => fault data structures should be unlocked before calling. 1471 * => if a success (TRUE) maps will be locked after call. 1472 */ 1473 boolean_t 1474 uvmfault_relock(struct uvm_faultinfo *ufi) 1475 { 1476 /* 1477 * ufi can be NULL when this isn't really a fault, 1478 * but merely paging in anon data. 1479 */ 1480 if (ufi == NULL) { 1481 return TRUE; 1482 } 1483 1484 uvmexp.fltrelck++; 1485 1486 /* 1487 * relock map. fail if version mismatch (in which case nothing 1488 * gets locked). 1489 */ 1490 vm_map_lock_read(ufi->map); 1491 if (ufi->mapv != ufi->map->timestamp) { 1492 vm_map_unlock_read(ufi->map); 1493 return(FALSE); 1494 } 1495 1496 uvmexp.fltrelckok++; 1497 return(TRUE); /* got it! */ 1498 } 1499