1 /* $NetBSD: uvm_fault.c,v 1.175 2010/06/22 18:34:50 rmind Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp 35 */ 36 37 /* 38 * uvm_fault.c: fault handler 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.175 2010/06/22 18:34:50 rmind Exp $"); 43 44 #include "opt_uvmhist.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/mman.h> 52 53 #include <uvm/uvm.h> 54 55 /* 56 * 57 * a word on page faults: 58 * 59 * types of page faults we handle: 60 * 61 * CASE 1: upper layer faults CASE 2: lower layer faults 62 * 63 * CASE 1A CASE 1B CASE 2A CASE 2B 64 * read/write1 write>1 read/write +-cow_write/zero 65 * | | | | 66 * +--|--+ +--|--+ +-----+ + | + | +-----+ 67 * amap | V | | ---------> new | | | | ^ | 68 * +-----+ +-----+ +-----+ + | + | +--|--+ 69 * | | | 70 * +-----+ +-----+ +--|--+ | +--|--+ 71 * uobj | d/c | | d/c | | V | +----+ | 72 * +-----+ +-----+ +-----+ +-----+ 73 * 74 * d/c = don't care 75 * 76 * case [0]: layerless fault 77 * no amap or uobj is present. this is an error. 78 * 79 * case [1]: upper layer fault [anon active] 80 * 1A: [read] or [write with anon->an_ref == 1] 81 * I/O takes place in upper level anon and uobj is not touched. 82 * 1B: [write with anon->an_ref > 1] 83 * new anon is alloc'd and data is copied off ["COW"] 84 * 85 * case [2]: lower layer fault [uobj] 86 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 87 * I/O takes place directly in object. 88 * 2B: [write to copy_on_write] or [read on NULL uobj] 89 * data is "promoted" from uobj to a new anon. 90 * if uobj is null, then we zero fill. 91 * 92 * we follow the standard UVM locking protocol ordering: 93 * 94 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) 95 * we hold a PG_BUSY page if we unlock for I/O 96 * 97 * 98 * the code is structured as follows: 99 * 100 * - init the "IN" params in the ufi structure 101 * ReFault: 102 * - do lookups [locks maps], check protection, handle needs_copy 103 * - check for case 0 fault (error) 104 * - establish "range" of fault 105 * - if we have an amap lock it and extract the anons 106 * - if sequential advice deactivate pages behind us 107 * - at the same time check pmap for unmapped areas and anon for pages 108 * that we could map in (and do map it if found) 109 * - check object for resident pages that we could map in 110 * - if (case 2) goto Case2 111 * - >>> handle case 1 112 * - ensure source anon is resident in RAM 113 * - if case 1B alloc new anon and copy from source 114 * - map the correct page in 115 * Case2: 116 * - >>> handle case 2 117 * - ensure source page is resident (if uobj) 118 * - if case 2B alloc new anon and copy from source (could be zero 119 * fill if uobj == NULL) 120 * - map the correct page in 121 * - done! 122 * 123 * note on paging: 124 * if we have to do I/O we place a PG_BUSY page in the correct object, 125 * unlock everything, and do the I/O. when I/O is done we must reverify 126 * the state of the world before assuming that our data structures are 127 * valid. [because mappings could change while the map is unlocked] 128 * 129 * alternative 1: unbusy the page in question and restart the page fault 130 * from the top (ReFault). this is easy but does not take advantage 131 * of the information that we already have from our previous lookup, 132 * although it is possible that the "hints" in the vm_map will help here. 133 * 134 * alternative 2: the system already keeps track of a "version" number of 135 * a map. [i.e. every time you write-lock a map (e.g. to change a 136 * mapping) you bump the version number up by one...] so, we can save 137 * the version number of the map before we release the lock and start I/O. 138 * then when I/O is done we can relock and check the version numbers 139 * to see if anything changed. this might save us some over 1 because 140 * we don't have to unbusy the page and may be less compares(?). 141 * 142 * alternative 3: put in backpointers or a way to "hold" part of a map 143 * in place while I/O is in progress. this could be complex to 144 * implement (especially with structures like amap that can be referenced 145 * by multiple map entries, and figuring out what should wait could be 146 * complex as well...). 147 * 148 * we use alternative 2. given that we are multi-threaded now we may want 149 * to reconsider the choice. 150 */ 151 152 /* 153 * local data structures 154 */ 155 156 struct uvm_advice { 157 int advice; 158 int nback; 159 int nforw; 160 }; 161 162 /* 163 * page range array: 164 * note: index in array must match "advice" value 165 * XXX: borrowed numbers from freebsd. do they work well for us? 166 */ 167 168 static const struct uvm_advice uvmadvice[] = { 169 { MADV_NORMAL, 3, 4 }, 170 { MADV_RANDOM, 0, 0 }, 171 { MADV_SEQUENTIAL, 8, 7}, 172 }; 173 174 #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */ 175 176 /* 177 * private prototypes 178 */ 179 180 /* 181 * inline functions 182 */ 183 184 /* 185 * uvmfault_anonflush: try and deactivate pages in specified anons 186 * 187 * => does not have to deactivate page if it is busy 188 */ 189 190 static inline void 191 uvmfault_anonflush(struct vm_anon **anons, int n) 192 { 193 int lcv; 194 struct vm_page *pg; 195 196 for (lcv = 0; lcv < n; lcv++) { 197 if (anons[lcv] == NULL) 198 continue; 199 mutex_enter(&anons[lcv]->an_lock); 200 pg = anons[lcv]->an_page; 201 if (pg && (pg->flags & PG_BUSY) == 0) { 202 mutex_enter(&uvm_pageqlock); 203 if (pg->wire_count == 0) { 204 uvm_pagedeactivate(pg); 205 } 206 mutex_exit(&uvm_pageqlock); 207 } 208 mutex_exit(&anons[lcv]->an_lock); 209 } 210 } 211 212 /* 213 * normal functions 214 */ 215 216 /* 217 * uvmfault_amapcopy: clear "needs_copy" in a map. 218 * 219 * => called with VM data structures unlocked (usually, see below) 220 * => we get a write lock on the maps and clear needs_copy for a VA 221 * => if we are out of RAM we sleep (waiting for more) 222 */ 223 224 static void 225 uvmfault_amapcopy(struct uvm_faultinfo *ufi) 226 { 227 for (;;) { 228 229 /* 230 * no mapping? give up. 231 */ 232 233 if (uvmfault_lookup(ufi, true) == false) 234 return; 235 236 /* 237 * copy if needed. 238 */ 239 240 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) 241 amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT, 242 ufi->orig_rvaddr, ufi->orig_rvaddr + 1); 243 244 /* 245 * didn't work? must be out of RAM. unlock and sleep. 246 */ 247 248 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { 249 uvmfault_unlockmaps(ufi, true); 250 uvm_wait("fltamapcopy"); 251 continue; 252 } 253 254 /* 255 * got it! unlock and return. 256 */ 257 258 uvmfault_unlockmaps(ufi, true); 259 return; 260 } 261 /*NOTREACHED*/ 262 } 263 264 /* 265 * uvmfault_anonget: get data in an anon into a non-busy, non-released 266 * page in that anon. 267 * 268 * => maps, amap, and anon locked by caller. 269 * => if we fail (result != 0) we unlock everything. 270 * => if we are successful, we return with everything still locked. 271 * => we don't move the page on the queues [gets moved later] 272 * => if we allocate a new page [we_own], it gets put on the queues. 273 * either way, the result is that the page is on the queues at return time 274 * => for pages which are on loan from a uvm_object (and thus are not 275 * owned by the anon): if successful, we return with the owning object 276 * locked. the caller must unlock this object when it unlocks everything 277 * else. 278 */ 279 280 int 281 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, 282 struct vm_anon *anon) 283 { 284 bool we_own; /* we own anon's page? */ 285 bool locked; /* did we relock? */ 286 struct vm_page *pg; 287 int error; 288 UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist); 289 290 KASSERT(mutex_owned(&anon->an_lock)); 291 292 error = 0; 293 uvmexp.fltanget++; 294 /* bump rusage counters */ 295 if (anon->an_page) 296 curlwp->l_ru.ru_minflt++; 297 else 298 curlwp->l_ru.ru_majflt++; 299 300 /* 301 * loop until we get it, or fail. 302 */ 303 304 for (;;) { 305 we_own = false; /* true if we set PG_BUSY on a page */ 306 pg = anon->an_page; 307 308 /* 309 * if there is a resident page and it is loaned, then anon 310 * may not own it. call out to uvm_anon_lockpage() to ensure 311 * the real owner of the page has been identified and locked. 312 */ 313 314 if (pg && pg->loan_count) 315 pg = uvm_anon_lockloanpg(anon); 316 317 /* 318 * page there? make sure it is not busy/released. 319 */ 320 321 if (pg) { 322 323 /* 324 * at this point, if the page has a uobject [meaning 325 * we have it on loan], then that uobject is locked 326 * by us! if the page is busy, we drop all the 327 * locks (including uobject) and try again. 328 */ 329 330 if ((pg->flags & PG_BUSY) == 0) { 331 UVMHIST_LOG(maphist, "<- OK",0,0,0,0); 332 return (0); 333 } 334 pg->flags |= PG_WANTED; 335 uvmexp.fltpgwait++; 336 337 /* 338 * the last unlock must be an atomic unlock+wait on 339 * the owner of page 340 */ 341 342 if (pg->uobject) { /* owner is uobject ? */ 343 uvmfault_unlockall(ufi, amap, NULL, anon); 344 UVMHIST_LOG(maphist, " unlock+wait on uobj",0, 345 0,0,0); 346 UVM_UNLOCK_AND_WAIT(pg, 347 &pg->uobject->vmobjlock, 348 false, "anonget1",0); 349 } else { 350 /* anon owns page */ 351 uvmfault_unlockall(ufi, amap, NULL, NULL); 352 UVMHIST_LOG(maphist, " unlock+wait on anon",0, 353 0,0,0); 354 UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0, 355 "anonget2",0); 356 } 357 } else { 358 #if defined(VMSWAP) 359 360 /* 361 * no page, we must try and bring it in. 362 */ 363 364 pg = uvm_pagealloc(NULL, 0, anon, 0); 365 if (pg == NULL) { /* out of RAM. */ 366 uvmfault_unlockall(ufi, amap, NULL, anon); 367 uvmexp.fltnoram++; 368 UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0, 369 0,0,0); 370 if (!uvm_reclaimable()) { 371 return ENOMEM; 372 } 373 uvm_wait("flt_noram1"); 374 } else { 375 /* we set the PG_BUSY bit */ 376 we_own = true; 377 uvmfault_unlockall(ufi, amap, NULL, anon); 378 379 /* 380 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN 381 * page into the uvm_swap_get function with 382 * all data structures unlocked. note that 383 * it is ok to read an_swslot here because 384 * we hold PG_BUSY on the page. 385 */ 386 uvmexp.pageins++; 387 error = uvm_swap_get(pg, anon->an_swslot, 388 PGO_SYNCIO); 389 390 /* 391 * we clean up after the i/o below in the 392 * "we_own" case 393 */ 394 } 395 #else /* defined(VMSWAP) */ 396 panic("%s: no page", __func__); 397 #endif /* defined(VMSWAP) */ 398 } 399 400 /* 401 * now relock and try again 402 */ 403 404 locked = uvmfault_relock(ufi); 405 if (locked && amap != NULL) { 406 amap_lock(amap); 407 } 408 if (locked || we_own) 409 mutex_enter(&anon->an_lock); 410 411 /* 412 * if we own the page (i.e. we set PG_BUSY), then we need 413 * to clean up after the I/O. there are three cases to 414 * consider: 415 * [1] page released during I/O: free anon and ReFault. 416 * [2] I/O not OK. free the page and cause the fault 417 * to fail. 418 * [3] I/O OK! activate the page and sync with the 419 * non-we_own case (i.e. drop anon lock if not locked). 420 */ 421 422 if (we_own) { 423 #if defined(VMSWAP) 424 if (pg->flags & PG_WANTED) { 425 wakeup(pg); 426 } 427 if (error) { 428 429 /* 430 * remove the swap slot from the anon 431 * and mark the anon as having no real slot. 432 * don't free the swap slot, thus preventing 433 * it from being used again. 434 */ 435 436 if (anon->an_swslot > 0) 437 uvm_swap_markbad(anon->an_swslot, 1); 438 anon->an_swslot = SWSLOT_BAD; 439 440 if ((pg->flags & PG_RELEASED) != 0) 441 goto released; 442 443 /* 444 * note: page was never !PG_BUSY, so it 445 * can't be mapped and thus no need to 446 * pmap_page_protect it... 447 */ 448 449 mutex_enter(&uvm_pageqlock); 450 uvm_pagefree(pg); 451 mutex_exit(&uvm_pageqlock); 452 453 if (locked) 454 uvmfault_unlockall(ufi, amap, NULL, 455 anon); 456 else 457 mutex_exit(&anon->an_lock); 458 UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0); 459 return error; 460 } 461 462 if ((pg->flags & PG_RELEASED) != 0) { 463 released: 464 KASSERT(anon->an_ref == 0); 465 466 /* 467 * released while we unlocked amap. 468 */ 469 470 if (locked) 471 uvmfault_unlockall(ufi, amap, NULL, 472 NULL); 473 474 uvm_anon_release(anon); 475 476 if (error) { 477 UVMHIST_LOG(maphist, 478 "<- ERROR/RELEASED", 0,0,0,0); 479 return error; 480 } 481 482 UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0); 483 return ERESTART; 484 } 485 486 /* 487 * we've successfully read the page, activate it. 488 */ 489 490 mutex_enter(&uvm_pageqlock); 491 uvm_pageactivate(pg); 492 mutex_exit(&uvm_pageqlock); 493 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 494 UVM_PAGE_OWN(pg, NULL); 495 if (!locked) 496 mutex_exit(&anon->an_lock); 497 #else /* defined(VMSWAP) */ 498 panic("%s: we_own", __func__); 499 #endif /* defined(VMSWAP) */ 500 } 501 502 /* 503 * we were not able to relock. restart fault. 504 */ 505 506 if (!locked) { 507 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); 508 return (ERESTART); 509 } 510 511 /* 512 * verify no one has touched the amap and moved the anon on us. 513 */ 514 515 if (ufi != NULL && 516 amap_lookup(&ufi->entry->aref, 517 ufi->orig_rvaddr - ufi->entry->start) != anon) { 518 519 uvmfault_unlockall(ufi, amap, NULL, anon); 520 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); 521 return (ERESTART); 522 } 523 524 /* 525 * try it again! 526 */ 527 528 uvmexp.fltanretry++; 529 continue; 530 } 531 /*NOTREACHED*/ 532 } 533 534 /* 535 * uvmfault_promote: promote data to a new anon. used for 1B and 2B. 536 * 537 * 1. allocate an anon and a page. 538 * 2. fill its contents. 539 * 3. put it into amap. 540 * 541 * => if we fail (result != 0) we unlock everything. 542 * => on success, return a new locked anon via 'nanon'. 543 * (*nanon)->an_page will be a resident, locked, dirty page. 544 */ 545 546 static int 547 uvmfault_promote(struct uvm_faultinfo *ufi, 548 struct vm_anon *oanon, 549 struct vm_page *uobjpage, 550 struct vm_anon **nanon, /* OUT: allocated anon */ 551 struct vm_anon **spare) 552 { 553 struct vm_amap *amap = ufi->entry->aref.ar_amap; 554 struct uvm_object *uobj; 555 struct vm_anon *anon; 556 struct vm_page *pg; 557 struct vm_page *opg; 558 int error; 559 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 560 561 if (oanon) { 562 /* anon COW */ 563 opg = oanon->an_page; 564 KASSERT(opg != NULL); 565 KASSERT(opg->uobject == NULL || opg->loan_count > 0); 566 } else if (uobjpage != PGO_DONTCARE) { 567 /* object-backed COW */ 568 opg = uobjpage; 569 } else { 570 /* ZFOD */ 571 opg = NULL; 572 } 573 if (opg != NULL) { 574 uobj = opg->uobject; 575 } else { 576 uobj = NULL; 577 } 578 579 KASSERT(amap != NULL); 580 KASSERT(uobjpage != NULL); 581 KASSERT(uobjpage == PGO_DONTCARE || (uobjpage->flags & PG_BUSY) != 0); 582 KASSERT(mutex_owned(&amap->am_l)); 583 KASSERT(oanon == NULL || mutex_owned(&oanon->an_lock)); 584 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 585 #if 0 586 KASSERT(*spare == NULL || !mutex_owned(&(*spare)->an_lock)); 587 #endif 588 589 if (*spare != NULL) { 590 anon = *spare; 591 *spare = NULL; 592 mutex_enter(&anon->an_lock); 593 } else if (ufi->map != kernel_map) { 594 anon = uvm_analloc(); 595 } else { 596 UVMHIST_LOG(maphist, "kernel_map, unlock and retry", 0,0,0,0); 597 598 /* 599 * we can't allocate anons with kernel_map locked. 600 */ 601 602 uvm_page_unbusy(&uobjpage, 1); 603 uvmfault_unlockall(ufi, amap, uobj, oanon); 604 605 *spare = uvm_analloc(); 606 if (*spare == NULL) { 607 goto nomem; 608 } 609 mutex_exit(&(*spare)->an_lock); 610 error = ERESTART; 611 goto done; 612 } 613 if (anon) { 614 615 /* 616 * The new anon is locked. 617 * 618 * if opg == NULL, we want a zero'd, dirty page, 619 * so have uvm_pagealloc() do that for us. 620 */ 621 622 pg = uvm_pagealloc(NULL, 0, anon, 623 (opg == NULL) ? UVM_PGA_ZERO : 0); 624 } else { 625 pg = NULL; 626 } 627 628 /* 629 * out of memory resources? 630 */ 631 632 if (pg == NULL) { 633 /* save anon for the next try. */ 634 if (anon != NULL) { 635 mutex_exit(&anon->an_lock); 636 *spare = anon; 637 } 638 639 /* unlock and fail ... */ 640 uvm_page_unbusy(&uobjpage, 1); 641 uvmfault_unlockall(ufi, amap, uobj, oanon); 642 nomem: 643 if (!uvm_reclaimable()) { 644 UVMHIST_LOG(maphist, "out of VM", 0,0,0,0); 645 uvmexp.fltnoanon++; 646 error = ENOMEM; 647 goto done; 648 } 649 650 UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0); 651 uvmexp.fltnoram++; 652 uvm_wait("flt_noram5"); 653 error = ERESTART; 654 goto done; 655 } 656 657 /* copy page [pg now dirty] */ 658 if (opg) { 659 uvm_pagecopy(opg, pg); 660 } 661 662 amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon, 663 oanon != NULL); 664 665 *nanon = anon; 666 error = 0; 667 done: 668 return error; 669 } 670 671 672 /* 673 * F A U L T - m a i n e n t r y p o i n t 674 */ 675 676 /* 677 * uvm_fault: page fault handler 678 * 679 * => called from MD code to resolve a page fault 680 * => VM data structures usually should be unlocked. however, it is 681 * possible to call here with the main map locked if the caller 682 * gets a write lock, sets it recusive, and then calls us (c.f. 683 * uvm_map_pageable). this should be avoided because it keeps 684 * the map locked off during I/O. 685 * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT 686 */ 687 688 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 689 ~VM_PROT_WRITE : VM_PROT_ALL) 690 691 /* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */ 692 #define UVM_FAULT_WIRE (1 << 0) 693 #define UVM_FAULT_MAXPROT (1 << 1) 694 695 struct uvm_faultctx { 696 vm_prot_t access_type; 697 vm_prot_t enter_prot; 698 vaddr_t startva; 699 int npages; 700 int centeridx; 701 struct vm_anon *anon_spare; 702 bool wire_mapping; 703 bool narrow; 704 bool wire_paging; 705 bool maxprot; 706 bool cow_now; 707 bool promote; 708 }; 709 710 static inline int uvm_fault_check( 711 struct uvm_faultinfo *, struct uvm_faultctx *, 712 struct vm_anon ***, struct vm_page ***); 713 714 static int uvm_fault_upper( 715 struct uvm_faultinfo *, struct uvm_faultctx *, 716 struct vm_anon **); 717 static inline int uvm_fault_upper_lookup( 718 struct uvm_faultinfo *, struct uvm_faultctx *, 719 struct vm_anon **, struct vm_page **); 720 static inline void uvm_fault_upper_neighbor( 721 struct uvm_faultinfo *, struct uvm_faultctx *, 722 vaddr_t, struct vm_page *, bool); 723 static inline int uvm_fault_upper_loan( 724 struct uvm_faultinfo *, struct uvm_faultctx *, 725 struct vm_anon *, struct uvm_object **); 726 static inline int uvm_fault_upper_promote( 727 struct uvm_faultinfo *, struct uvm_faultctx *, 728 struct uvm_object *, struct vm_anon *); 729 static inline int uvm_fault_upper_direct( 730 struct uvm_faultinfo *, struct uvm_faultctx *, 731 struct uvm_object *, struct vm_anon *); 732 static int uvm_fault_upper_enter( 733 struct uvm_faultinfo *, struct uvm_faultctx *, 734 struct uvm_object *, struct vm_anon *, 735 struct vm_page *, struct vm_anon *); 736 static inline void uvm_fault_upper_done( 737 struct uvm_faultinfo *, struct uvm_faultctx *, 738 struct uvm_object *, struct vm_anon *, 739 struct vm_page *); 740 741 static int uvm_fault_lower( 742 struct uvm_faultinfo *, struct uvm_faultctx *, 743 struct vm_page **); 744 static inline void uvm_fault_lower_lookup( 745 struct uvm_faultinfo *, struct uvm_faultctx *, 746 struct vm_page **); 747 static inline void uvm_fault_lower_neighbor( 748 struct uvm_faultinfo *, struct uvm_faultctx *, 749 vaddr_t, struct vm_page *, bool); 750 static inline int uvm_fault_lower_io( 751 struct uvm_faultinfo *, struct uvm_faultctx *, 752 struct uvm_object **, struct vm_page **); 753 static inline int uvm_fault_lower_direct( 754 struct uvm_faultinfo *, struct uvm_faultctx *, 755 struct uvm_object *, struct vm_page *); 756 static inline int uvm_fault_lower_direct_loan( 757 struct uvm_faultinfo *, struct uvm_faultctx *, 758 struct uvm_object *, struct vm_page **, 759 struct vm_page **); 760 static inline int uvm_fault_lower_promote( 761 struct uvm_faultinfo *, struct uvm_faultctx *, 762 struct uvm_object *, struct vm_page *); 763 static int uvm_fault_lower_enter( 764 struct uvm_faultinfo *, struct uvm_faultctx *, 765 struct uvm_object *, 766 struct vm_anon *, struct vm_page *, 767 struct vm_page *); 768 static inline void uvm_fault_lower_done( 769 struct uvm_faultinfo *, struct uvm_faultctx *, 770 struct uvm_object *, struct vm_anon *, 771 struct vm_page *); 772 773 int 774 uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr, 775 vm_prot_t access_type, int fault_flag) 776 { 777 struct uvm_faultinfo ufi; 778 struct uvm_faultctx flt = { 779 .access_type = access_type, 780 781 /* don't look for neighborhood * pages on "wire" fault */ 782 .narrow = (fault_flag & UVM_FAULT_WIRE) != 0, 783 784 /* "wire" fault causes wiring of both mapping and paging */ 785 .wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0, 786 .wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0, 787 788 .maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0, 789 }; 790 struct vm_anon *anons_store[UVM_MAXRANGE], **anons; 791 struct vm_page *pages_store[UVM_MAXRANGE], **pages; 792 int error; 793 UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist); 794 795 UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, at=%d, ff=%d)", 796 orig_map, vaddr, access_type, fault_flag); 797 798 uvmexp.faults++; /* XXX: locking? */ 799 800 /* 801 * init the IN parameters in the ufi 802 */ 803 804 ufi.orig_map = orig_map; 805 ufi.orig_rvaddr = trunc_page(vaddr); 806 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */ 807 808 error = ERESTART; 809 while (error == ERESTART) { 810 anons = anons_store; 811 pages = pages_store; 812 813 error = uvm_fault_check(&ufi, &flt, &anons, &pages); 814 if (error != 0) 815 continue; 816 817 error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages); 818 if (error != 0) 819 continue; 820 821 if (pages[flt.centeridx] == PGO_DONTCARE) 822 error = uvm_fault_upper(&ufi, &flt, anons); 823 else { 824 struct uvm_object * const uobj = ufi.entry->object.uvm_obj; 825 826 if (uobj && uobj->pgops->pgo_fault != NULL) { 827 /* 828 * invoke "special" fault routine. 829 */ 830 mutex_enter(&uobj->vmobjlock); 831 /* locked: maps(read), amap(if there), uobj */ 832 error = uobj->pgops->pgo_fault(&ufi, 833 flt.startva, pages, flt.npages, 834 flt.centeridx, flt.access_type, 835 PGO_LOCKED|PGO_SYNCIO); 836 837 /* locked: nothing, pgo_fault has unlocked everything */ 838 839 /* 840 * object fault routine responsible for pmap_update(). 841 */ 842 } else { 843 error = uvm_fault_lower(&ufi, &flt, pages); 844 } 845 } 846 } 847 848 if (flt.anon_spare != NULL) { 849 flt.anon_spare->an_ref--; 850 uvm_anfree(flt.anon_spare); 851 } 852 return error; 853 } 854 855 /* 856 * uvm_fault_check: check prot, handle needs-copy, etc. 857 * 858 * 1. lookup entry. 859 * 2. check protection. 860 * 3. adjust fault condition (mainly for simulated fault). 861 * 4. handle needs-copy (lazy amap copy). 862 * 5. establish range of interest for neighbor fault (aka pre-fault). 863 * 6. look up anons (if amap exists). 864 * 7. flush pages (if MADV_SEQUENTIAL) 865 * 866 * => called with nothing locked. 867 * => if we fail (result != 0) we unlock everything. 868 */ 869 870 static int 871 uvm_fault_check( 872 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 873 struct vm_anon ***ranons, struct vm_page ***rpages) 874 { 875 struct vm_amap *amap; 876 struct uvm_object *uobj; 877 vm_prot_t check_prot; 878 int nback, nforw; 879 UVMHIST_FUNC("uvm_fault_check"); UVMHIST_CALLED(maphist); 880 881 /* 882 * lookup and lock the maps 883 */ 884 885 if (uvmfault_lookup(ufi, false) == false) { 886 UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", ufi->orig_rvaddr, 0,0,0); 887 return EFAULT; 888 } 889 /* locked: maps(read) */ 890 891 #ifdef DIAGNOSTIC 892 if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) { 893 printf("Page fault on non-pageable map:\n"); 894 printf("ufi->map = %p\n", ufi->map); 895 printf("ufi->orig_map = %p\n", ufi->orig_map); 896 printf("ufi->orig_rvaddr = 0x%lx\n", (u_long) ufi->orig_rvaddr); 897 panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0"); 898 } 899 #endif 900 901 /* 902 * check protection 903 */ 904 905 check_prot = flt->maxprot ? 906 ufi->entry->max_protection : ufi->entry->protection; 907 if ((check_prot & flt->access_type) != flt->access_type) { 908 UVMHIST_LOG(maphist, 909 "<- protection failure (prot=0x%x, access=0x%x)", 910 ufi->entry->protection, flt->access_type, 0, 0); 911 uvmfault_unlockmaps(ufi, false); 912 return EACCES; 913 } 914 915 /* 916 * "enter_prot" is the protection we want to enter the page in at. 917 * for certain pages (e.g. copy-on-write pages) this protection can 918 * be more strict than ufi->entry->protection. "wired" means either 919 * the entry is wired or we are fault-wiring the pg. 920 */ 921 922 flt->enter_prot = ufi->entry->protection; 923 if (VM_MAPENT_ISWIRED(ufi->entry)) 924 flt->wire_mapping = true; 925 926 if (flt->wire_mapping) { 927 flt->access_type = flt->enter_prot; /* full access for wired */ 928 flt->cow_now = (check_prot & VM_PROT_WRITE) != 0; 929 } else { 930 flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0; 931 } 932 933 flt->promote = false; 934 935 /* 936 * handle "needs_copy" case. if we need to copy the amap we will 937 * have to drop our readlock and relock it with a write lock. (we 938 * need a write lock to change anything in a map entry [e.g. 939 * needs_copy]). 940 */ 941 942 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { 943 if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) { 944 KASSERT(!flt->maxprot); 945 /* need to clear */ 946 UVMHIST_LOG(maphist, 947 " need to clear needs_copy and refault",0,0,0,0); 948 uvmfault_unlockmaps(ufi, false); 949 uvmfault_amapcopy(ufi); 950 uvmexp.fltamcopy++; 951 return ERESTART; 952 953 } else { 954 955 /* 956 * ensure that we pmap_enter page R/O since 957 * needs_copy is still true 958 */ 959 960 flt->enter_prot &= ~VM_PROT_WRITE; 961 } 962 } 963 964 /* 965 * identify the players 966 */ 967 968 amap = ufi->entry->aref.ar_amap; /* upper layer */ 969 uobj = ufi->entry->object.uvm_obj; /* lower layer */ 970 971 /* 972 * check for a case 0 fault. if nothing backing the entry then 973 * error now. 974 */ 975 976 if (amap == NULL && uobj == NULL) { 977 uvmfault_unlockmaps(ufi, false); 978 UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0); 979 return EFAULT; 980 } 981 982 /* 983 * establish range of interest based on advice from mapper 984 * and then clip to fit map entry. note that we only want 985 * to do this the first time through the fault. if we 986 * ReFault we will disable this by setting "narrow" to true. 987 */ 988 989 if (flt->narrow == false) { 990 991 /* wide fault (!narrow) */ 992 KASSERT(uvmadvice[ufi->entry->advice].advice == 993 ufi->entry->advice); 994 nback = MIN(uvmadvice[ufi->entry->advice].nback, 995 (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT); 996 flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT); 997 nforw = MIN(uvmadvice[ufi->entry->advice].nforw, 998 ((ufi->entry->end - ufi->orig_rvaddr) >> 999 PAGE_SHIFT) - 1); 1000 /* 1001 * note: "-1" because we don't want to count the 1002 * faulting page as forw 1003 */ 1004 flt->npages = nback + nforw + 1; 1005 flt->centeridx = nback; 1006 1007 flt->narrow = true; /* ensure only once per-fault */ 1008 1009 } else { 1010 1011 /* narrow fault! */ 1012 nback = nforw = 0; 1013 flt->startva = ufi->orig_rvaddr; 1014 flt->npages = 1; 1015 flt->centeridx = 0; 1016 1017 } 1018 /* offset from entry's start to pgs' start */ 1019 const voff_t eoff = flt->startva - ufi->entry->start; 1020 1021 /* locked: maps(read) */ 1022 UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x", 1023 flt->narrow, nback, nforw, flt->startva); 1024 UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi->entry, 1025 amap, uobj, 0); 1026 1027 /* 1028 * if we've got an amap, lock it and extract current anons. 1029 */ 1030 1031 if (amap) { 1032 amap_lock(amap); 1033 amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages); 1034 } else { 1035 *ranons = NULL; /* to be safe */ 1036 } 1037 1038 /* locked: maps(read), amap(if there) */ 1039 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1040 1041 /* 1042 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages 1043 * now and then forget about them (for the rest of the fault). 1044 */ 1045 1046 if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) { 1047 1048 UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages", 1049 0,0,0,0); 1050 /* flush back-page anons? */ 1051 if (amap) 1052 uvmfault_anonflush(*ranons, nback); 1053 1054 /* flush object? */ 1055 if (uobj) { 1056 voff_t uoff; 1057 1058 uoff = ufi->entry->offset + eoff; 1059 mutex_enter(&uobj->vmobjlock); 1060 (void) (uobj->pgops->pgo_put)(uobj, uoff, uoff + 1061 (nback << PAGE_SHIFT), PGO_DEACTIVATE); 1062 } 1063 1064 /* now forget about the backpages */ 1065 if (amap) 1066 *ranons += nback; 1067 flt->startva += (nback << PAGE_SHIFT); 1068 flt->npages -= nback; 1069 flt->centeridx = 0; 1070 } 1071 /* 1072 * => startva is fixed 1073 * => npages is fixed 1074 */ 1075 1076 return 0; 1077 } 1078 1079 /* 1080 * uvm_fault_upper_lookup: look up existing h/w mapping and amap. 1081 * 1082 * iterate range of interest: 1083 * 1. check if h/w mapping exists. if yes, we don't care 1084 * 2. check if anon exists. if not, page is lower. 1085 * 3. if anon exists, enter h/w mapping for neighbors. 1086 * 1087 * => called with amap locked (if exists). 1088 */ 1089 1090 static int 1091 uvm_fault_upper_lookup( 1092 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1093 struct vm_anon **anons, struct vm_page **pages) 1094 { 1095 struct vm_amap *amap = ufi->entry->aref.ar_amap; 1096 int lcv; 1097 vaddr_t currva; 1098 bool shadowed; 1099 UVMHIST_FUNC("uvm_fault_upper_lookup"); UVMHIST_CALLED(maphist); 1100 1101 /* locked: maps(read), amap(if there) */ 1102 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1103 1104 /* 1105 * map in the backpages and frontpages we found in the amap in hopes 1106 * of preventing future faults. we also init the pages[] array as 1107 * we go. 1108 */ 1109 1110 currva = flt->startva; 1111 shadowed = false; 1112 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) { 1113 /* 1114 * dont play with VAs that are already mapped 1115 * except for center) 1116 */ 1117 if (lcv != flt->centeridx && 1118 pmap_extract(ufi->orig_map->pmap, currva, NULL)) { 1119 pages[lcv] = PGO_DONTCARE; 1120 continue; 1121 } 1122 1123 /* 1124 * unmapped or center page. check if any anon at this level. 1125 */ 1126 if (amap == NULL || anons[lcv] == NULL) { 1127 pages[lcv] = NULL; 1128 continue; 1129 } 1130 1131 /* 1132 * check for present page and map if possible. re-activate it. 1133 */ 1134 1135 pages[lcv] = PGO_DONTCARE; 1136 if (lcv == flt->centeridx) { /* save center for later! */ 1137 shadowed = true; 1138 } else { 1139 struct vm_anon *anon = anons[lcv]; 1140 1141 mutex_enter(&anon->an_lock); 1142 struct vm_page *pg = anon->an_page; 1143 1144 /* ignore loaned and busy pages */ 1145 if (pg != NULL && pg->loan_count == 0 && 1146 (pg->flags & PG_BUSY) == 0) 1147 uvm_fault_upper_neighbor(ufi, flt, currva, 1148 pg, anon->an_ref > 1); 1149 mutex_exit(&anon->an_lock); 1150 } 1151 } 1152 1153 /* locked: maps(read), amap(if there) */ 1154 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1155 /* (shadowed == true) if there is an anon at the faulting address */ 1156 UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed, 1157 (ufi->entry->object.uvm_obj && shadowed != false),0,0); 1158 1159 /* 1160 * note that if we are really short of RAM we could sleep in the above 1161 * call to pmap_enter with everything locked. bad? 1162 * 1163 * XXX Actually, that is bad; pmap_enter() should just fail in that 1164 * XXX case. --thorpej 1165 */ 1166 1167 return 0; 1168 } 1169 1170 /* 1171 * uvm_fault_upper_neighbor: enter single lower neighbor page. 1172 * 1173 * => called with amap and anon locked. 1174 */ 1175 1176 static void 1177 uvm_fault_upper_neighbor( 1178 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1179 vaddr_t currva, struct vm_page *pg, bool readonly) 1180 { 1181 UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist); 1182 1183 /* locked: amap, anon */ 1184 1185 mutex_enter(&uvm_pageqlock); 1186 uvm_pageenqueue(pg); 1187 mutex_exit(&uvm_pageqlock); 1188 UVMHIST_LOG(maphist, 1189 " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x", 1190 ufi->orig_map->pmap, currva, pg, 0); 1191 uvmexp.fltnamap++; 1192 1193 /* 1194 * Since this page isn't the page that's actually faulting, 1195 * ignore pmap_enter() failures; it's not critical that we 1196 * enter these right now. 1197 */ 1198 1199 (void) pmap_enter(ufi->orig_map->pmap, currva, 1200 VM_PAGE_TO_PHYS(pg), 1201 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) : 1202 flt->enter_prot, 1203 PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)); 1204 1205 pmap_update(ufi->orig_map->pmap); 1206 } 1207 1208 /* 1209 * uvm_fault_upper: handle upper fault. 1210 * 1211 * 1. acquire anon lock. 1212 * 2. get anon. let uvmfault_anonget do the dirty work. 1213 * 3. handle loan. 1214 * 4. dispatch direct or promote handlers. 1215 */ 1216 1217 static int 1218 uvm_fault_upper( 1219 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1220 struct vm_anon **anons) 1221 { 1222 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 1223 struct vm_anon * const anon = anons[flt->centeridx]; 1224 struct uvm_object *uobj; 1225 int error; 1226 UVMHIST_FUNC("uvm_fault_upper"); UVMHIST_CALLED(maphist); 1227 1228 /* locked: maps(read), amap */ 1229 KASSERT(mutex_owned(&amap->am_l)); 1230 1231 /* 1232 * handle case 1: fault on an anon in our amap 1233 */ 1234 1235 UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0); 1236 mutex_enter(&anon->an_lock); 1237 1238 /* locked: maps(read), amap, anon */ 1239 KASSERT(mutex_owned(&amap->am_l)); 1240 KASSERT(mutex_owned(&anon->an_lock)); 1241 1242 /* 1243 * no matter if we have case 1A or case 1B we are going to need to 1244 * have the anon's memory resident. ensure that now. 1245 */ 1246 1247 /* 1248 * let uvmfault_anonget do the dirty work. 1249 * if it fails (!OK) it will unlock everything for us. 1250 * if it succeeds, locks are still valid and locked. 1251 * also, if it is OK, then the anon's page is on the queues. 1252 * if the page is on loan from a uvm_object, then anonget will 1253 * lock that object for us if it does not fail. 1254 */ 1255 1256 error = uvmfault_anonget(ufi, amap, anon); 1257 switch (error) { 1258 case 0: 1259 break; 1260 1261 case ERESTART: 1262 return ERESTART; 1263 1264 case EAGAIN: 1265 kpause("fltagain1", false, hz/2, NULL); 1266 return ERESTART; 1267 1268 default: 1269 return error; 1270 } 1271 1272 /* 1273 * uobj is non null if the page is on loan from an object (i.e. uobj) 1274 */ 1275 1276 uobj = anon->an_page->uobject; /* locked by anonget if !NULL */ 1277 1278 /* locked: maps(read), amap, anon, uobj(if one) */ 1279 KASSERT(mutex_owned(&amap->am_l)); 1280 KASSERT(mutex_owned(&anon->an_lock)); 1281 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 1282 1283 /* 1284 * special handling for loaned pages 1285 */ 1286 1287 if (anon->an_page->loan_count) { 1288 error = uvm_fault_upper_loan(ufi, flt, anon, &uobj); 1289 if (error != 0) 1290 return error; 1291 } 1292 1293 /* 1294 * if we are case 1B then we will need to allocate a new blank 1295 * anon to transfer the data into. note that we have a lock 1296 * on anon, so no one can busy or release the page until we are done. 1297 * also note that the ref count can't drop to zero here because 1298 * it is > 1 and we are only dropping one ref. 1299 * 1300 * in the (hopefully very rare) case that we are out of RAM we 1301 * will unlock, wait for more RAM, and refault. 1302 * 1303 * if we are out of anon VM we kill the process (XXX: could wait?). 1304 */ 1305 1306 if (flt->cow_now && anon->an_ref > 1) { 1307 flt->promote = true; 1308 error = uvm_fault_upper_promote(ufi, flt, uobj, anon); 1309 } else { 1310 error = uvm_fault_upper_direct(ufi, flt, uobj, anon); 1311 } 1312 return error; 1313 } 1314 1315 /* 1316 * uvm_fault_upper_loan: handle loaned upper page. 1317 * 1318 * 1. if not cow'ing now, just mark enter_prot as read-only. 1319 * 2. if cow'ing now, and if ref count is 1, break loan. 1320 */ 1321 1322 static int 1323 uvm_fault_upper_loan( 1324 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1325 struct vm_anon *anon, struct uvm_object **ruobj) 1326 { 1327 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 1328 int error = 0; 1329 UVMHIST_FUNC("uvm_fault_upper_loan"); UVMHIST_CALLED(maphist); 1330 1331 if (!flt->cow_now) { 1332 1333 /* 1334 * for read faults on loaned pages we just cap the 1335 * protection at read-only. 1336 */ 1337 1338 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE; 1339 1340 } else { 1341 /* 1342 * note that we can't allow writes into a loaned page! 1343 * 1344 * if we have a write fault on a loaned page in an 1345 * anon then we need to look at the anon's ref count. 1346 * if it is greater than one then we are going to do 1347 * a normal copy-on-write fault into a new anon (this 1348 * is not a problem). however, if the reference count 1349 * is one (a case where we would normally allow a 1350 * write directly to the page) then we need to kill 1351 * the loan before we continue. 1352 */ 1353 1354 /* >1 case is already ok */ 1355 if (anon->an_ref == 1) { 1356 error = uvm_loanbreak_anon(anon, *ruobj); 1357 if (error != 0) { 1358 uvmfault_unlockall(ufi, amap, *ruobj, anon); 1359 uvm_wait("flt_noram2"); 1360 return ERESTART; 1361 } 1362 /* if we were a loan reciever uobj is gone */ 1363 if (*ruobj) 1364 *ruobj = NULL; 1365 } 1366 } 1367 return error; 1368 } 1369 1370 /* 1371 * uvm_fault_upper_promote: promote upper page. 1372 * 1373 * 1. call uvmfault_promote. 1374 * 2. enqueue page. 1375 * 3. deref. 1376 * 4. pass page to uvm_fault_upper_enter. 1377 */ 1378 1379 static int 1380 uvm_fault_upper_promote( 1381 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1382 struct uvm_object *uobj, struct vm_anon *anon) 1383 { 1384 struct vm_anon * const oanon = anon; 1385 struct vm_page *pg; 1386 int error; 1387 UVMHIST_FUNC("uvm_fault_upper_promote"); UVMHIST_CALLED(maphist); 1388 1389 UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0); 1390 uvmexp.flt_acow++; 1391 1392 error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, 1393 &anon, &flt->anon_spare); 1394 switch (error) { 1395 case 0: 1396 break; 1397 case ERESTART: 1398 return ERESTART; 1399 default: 1400 return error; 1401 } 1402 1403 pg = anon->an_page; 1404 mutex_enter(&uvm_pageqlock); 1405 uvm_pageactivate(pg); 1406 mutex_exit(&uvm_pageqlock); 1407 pg->flags &= ~(PG_BUSY|PG_FAKE); 1408 UVM_PAGE_OWN(pg, NULL); 1409 1410 /* deref: can not drop to zero here by defn! */ 1411 oanon->an_ref--; 1412 1413 /* 1414 * note: oanon is still locked, as is the new anon. we 1415 * need to check for this later when we unlock oanon; if 1416 * oanon != anon, we'll have to unlock anon, too. 1417 */ 1418 1419 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon); 1420 } 1421 1422 /* 1423 * uvm_fault_upper_direct: handle direct fault. 1424 */ 1425 1426 static int 1427 uvm_fault_upper_direct( 1428 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1429 struct uvm_object *uobj, struct vm_anon *anon) 1430 { 1431 struct vm_anon * const oanon = anon; 1432 struct vm_page *pg; 1433 UVMHIST_FUNC("uvm_fault_upper_direct"); UVMHIST_CALLED(maphist); 1434 1435 uvmexp.flt_anon++; 1436 pg = anon->an_page; 1437 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ 1438 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE; 1439 1440 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon); 1441 } 1442 1443 /* 1444 * uvm_fault_upper_enter: enter h/w mapping of upper page. 1445 */ 1446 1447 static int 1448 uvm_fault_upper_enter( 1449 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1450 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg, 1451 struct vm_anon *oanon) 1452 { 1453 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 1454 UVMHIST_FUNC("uvm_fault_upper_enter"); UVMHIST_CALLED(maphist); 1455 1456 /* locked: maps(read), amap, oanon, anon(if different from oanon) */ 1457 KASSERT(mutex_owned(&amap->am_l)); 1458 KASSERT(mutex_owned(&anon->an_lock)); 1459 KASSERT(mutex_owned(&oanon->an_lock)); 1460 1461 /* 1462 * now map the page in. 1463 */ 1464 1465 UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", 1466 ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); 1467 if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg), 1468 flt->enter_prot, flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)) 1469 != 0) { 1470 1471 /* 1472 * No need to undo what we did; we can simply think of 1473 * this as the pmap throwing away the mapping information. 1474 * 1475 * We do, however, have to go through the ReFault path, 1476 * as the map may change while we're asleep. 1477 */ 1478 1479 if (anon != oanon) 1480 mutex_exit(&anon->an_lock); 1481 uvmfault_unlockall(ufi, amap, uobj, oanon); 1482 if (!uvm_reclaimable()) { 1483 UVMHIST_LOG(maphist, 1484 "<- failed. out of VM",0,0,0,0); 1485 /* XXX instrumentation */ 1486 return ENOMEM; 1487 } 1488 /* XXX instrumentation */ 1489 uvm_wait("flt_pmfail1"); 1490 return ERESTART; 1491 } 1492 1493 uvm_fault_upper_done(ufi, flt, uobj, anon, pg); 1494 1495 /* 1496 * done case 1! finish up by unlocking everything and returning success 1497 */ 1498 1499 if (anon != oanon) { 1500 mutex_exit(&anon->an_lock); 1501 } 1502 pmap_update(ufi->orig_map->pmap); 1503 uvmfault_unlockall(ufi, amap, uobj, oanon); 1504 return 0; 1505 } 1506 1507 /* 1508 * uvm_fault_upper_done: queue upper center page. 1509 */ 1510 1511 static void 1512 uvm_fault_upper_done( 1513 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1514 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg) 1515 { 1516 const bool wire_paging = flt->wire_paging; 1517 1518 UVMHIST_FUNC("uvm_fault_upper_done"); UVMHIST_CALLED(maphist); 1519 1520 /* 1521 * ... update the page queues. 1522 */ 1523 1524 mutex_enter(&uvm_pageqlock); 1525 if (wire_paging) { 1526 uvm_pagewire(pg); 1527 1528 /* 1529 * since the now-wired page cannot be paged out, 1530 * release its swap resources for others to use. 1531 * since an anon with no swap cannot be PG_CLEAN, 1532 * clear its clean flag now. 1533 */ 1534 1535 pg->flags &= ~(PG_CLEAN); 1536 1537 } else { 1538 uvm_pageactivate(pg); 1539 } 1540 mutex_exit(&uvm_pageqlock); 1541 1542 if (wire_paging) { 1543 uvm_anon_dropswap(anon); 1544 } 1545 } 1546 1547 /* 1548 * uvm_fault_lower: handle lower fault. 1549 * 1550 * 1. check uobj 1551 * 1.1. if null, ZFOD. 1552 * 1.2. if not null, look up unnmapped neighbor pages. 1553 * 2. for center page, check if promote. 1554 * 2.1. ZFOD always needs promotion. 1555 * 2.2. other uobjs, when entry is marked COW (usually MAP_PRIVATE vnode). 1556 * 3. if uobj is not ZFOD and page is not found, do i/o. 1557 * 4. dispatch either direct / promote fault. 1558 */ 1559 1560 static int 1561 uvm_fault_lower( 1562 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1563 struct vm_page **pages) 1564 { 1565 #ifdef DIAGNOSTIC 1566 struct vm_amap *amap = ufi->entry->aref.ar_amap; 1567 #endif 1568 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 1569 struct vm_page *uobjpage; 1570 int error; 1571 UVMHIST_FUNC("uvm_fault_lower"); UVMHIST_CALLED(maphist); 1572 1573 /* locked: maps(read), amap(if there), uobj(if !null) */ 1574 1575 /* 1576 * now, if the desired page is not shadowed by the amap and we have 1577 * a backing object that does not have a special fault routine, then 1578 * we ask (with pgo_get) the object for resident pages that we care 1579 * about and attempt to map them in. we do not let pgo_get block 1580 * (PGO_LOCKED). 1581 */ 1582 1583 if (uobj == NULL) { 1584 /* zero fill; don't care neighbor pages */ 1585 uobjpage = NULL; 1586 } else { 1587 uvm_fault_lower_lookup(ufi, flt, pages); 1588 uobjpage = pages[flt->centeridx]; 1589 } 1590 1591 /* locked: maps(read), amap(if there), uobj(if !null), uobjpage(if !null) */ 1592 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1593 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 1594 KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0); 1595 1596 /* 1597 * note that at this point we are done with any front or back pages. 1598 * we are now going to focus on the center page (i.e. the one we've 1599 * faulted on). if we have faulted on the upper (anon) layer 1600 * [i.e. case 1], then the anon we want is anons[centeridx] (we have 1601 * not touched it yet). if we have faulted on the bottom (uobj) 1602 * layer [i.e. case 2] and the page was both present and available, 1603 * then we've got a pointer to it as "uobjpage" and we've already 1604 * made it BUSY. 1605 */ 1606 1607 /* 1608 * locked: 1609 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null) 1610 */ 1611 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1612 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 1613 KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0); 1614 1615 /* 1616 * note that uobjpage can not be PGO_DONTCARE at this point. we now 1617 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we 1618 * have a backing object, check and see if we are going to promote 1619 * the data up to an anon during the fault. 1620 */ 1621 1622 if (uobj == NULL) { 1623 uobjpage = PGO_DONTCARE; 1624 flt->promote = true; /* always need anon here */ 1625 } else { 1626 KASSERT(uobjpage != PGO_DONTCARE); 1627 flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry); 1628 } 1629 UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d", 1630 flt->promote, (uobj == NULL), 0,0); 1631 1632 /* 1633 * if uobjpage is not null then we do not need to do I/O to get the 1634 * uobjpage. 1635 * 1636 * if uobjpage is null, then we need to unlock and ask the pager to 1637 * get the data for us. once we have the data, we need to reverify 1638 * the state the world. we are currently not holding any resources. 1639 */ 1640 1641 if (uobjpage) { 1642 /* update rusage counters */ 1643 curlwp->l_ru.ru_minflt++; 1644 } else { 1645 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage); 1646 if (error != 0) 1647 return error; 1648 } 1649 1650 /* 1651 * locked: 1652 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj) 1653 */ 1654 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 1655 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 1656 KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0); 1657 1658 /* 1659 * notes: 1660 * - at this point uobjpage can not be NULL 1661 * - at this point uobjpage can not be PG_RELEASED (since we checked 1662 * for it above) 1663 * - at this point uobjpage could be PG_WANTED (handle later) 1664 */ 1665 1666 KASSERT(uobj == NULL || uobj == uobjpage->uobject); 1667 KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) || 1668 (uobjpage->flags & PG_CLEAN) != 0); 1669 1670 if (flt->promote == false) { 1671 error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage); 1672 } else { 1673 error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage); 1674 } 1675 return error; 1676 } 1677 1678 /* 1679 * uvm_fault_lower_lookup: look up on-memory uobj pages. 1680 * 1681 * 1. get on-memory pages. 1682 * 2. if failed, give up (get only center page later). 1683 * 3. if succeeded, enter h/w mapping of neighbor pages. 1684 */ 1685 1686 static void 1687 uvm_fault_lower_lookup( 1688 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1689 struct vm_page **pages) 1690 { 1691 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 1692 int lcv, gotpages; 1693 vaddr_t currva; 1694 UVMHIST_FUNC("uvm_fault_lower_lookup"); UVMHIST_CALLED(maphist); 1695 1696 mutex_enter(&uobj->vmobjlock); 1697 /* locked: maps(read), amap(if there), uobj */ 1698 /* 1699 * the following call to pgo_get does _not_ change locking state 1700 */ 1701 1702 uvmexp.fltlget++; 1703 gotpages = flt->npages; 1704 (void) uobj->pgops->pgo_get(uobj, 1705 ufi->entry->offset + flt->startva - ufi->entry->start, 1706 pages, &gotpages, flt->centeridx, 1707 flt->access_type & MASK(ufi->entry), ufi->entry->advice, PGO_LOCKED); 1708 1709 /* 1710 * check for pages to map, if we got any 1711 */ 1712 1713 if (gotpages == 0) { 1714 pages[flt->centeridx] = NULL; 1715 return; 1716 } 1717 1718 currva = flt->startva; 1719 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) { 1720 struct vm_page *curpg; 1721 1722 curpg = pages[lcv]; 1723 if (curpg == NULL || curpg == PGO_DONTCARE) { 1724 continue; 1725 } 1726 KASSERT(curpg->uobject == uobj); 1727 1728 /* 1729 * if center page is resident and not PG_BUSY|PG_RELEASED 1730 * then pgo_get made it PG_BUSY for us and gave us a handle 1731 * to it. 1732 */ 1733 1734 if (lcv == flt->centeridx) { 1735 UVMHIST_LOG(maphist, " got uobjpage " 1736 "(0x%x) with locked get", 1737 curpg, 0,0,0); 1738 } else { 1739 bool readonly = (curpg->flags & PG_RDONLY) 1740 || (curpg->loan_count > 0) 1741 || UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject); 1742 1743 uvm_fault_lower_neighbor(ufi, flt, 1744 currva, curpg, readonly); 1745 } 1746 } 1747 pmap_update(ufi->orig_map->pmap); 1748 } 1749 1750 /* 1751 * uvm_fault_lower_neighbor: enter h/w mapping of lower neighbor page. 1752 */ 1753 1754 static void 1755 uvm_fault_lower_neighbor( 1756 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1757 vaddr_t currva, struct vm_page *pg, bool readonly) 1758 { 1759 UVMHIST_FUNC("uvm_fault_lower_neighor"); UVMHIST_CALLED(maphist); 1760 1761 /* locked: maps(read), amap(if there), uobj */ 1762 1763 /* 1764 * calling pgo_get with PGO_LOCKED returns us pages which 1765 * are neither busy nor released, so we don't need to check 1766 * for this. we can just directly enter the pages. 1767 */ 1768 1769 mutex_enter(&uvm_pageqlock); 1770 uvm_pageenqueue(pg); 1771 mutex_exit(&uvm_pageqlock); 1772 UVMHIST_LOG(maphist, 1773 " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x", 1774 ufi->orig_map->pmap, currva, pg, 0); 1775 uvmexp.fltnomap++; 1776 1777 /* 1778 * Since this page isn't the page that's actually faulting, 1779 * ignore pmap_enter() failures; it's not critical that we 1780 * enter these right now. 1781 * NOTE: page can't be PG_WANTED or PG_RELEASED because we've 1782 * held the lock the whole time we've had the handle. 1783 */ 1784 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1785 KASSERT((pg->flags & PG_RELEASED) == 0); 1786 KASSERT((pg->flags & PG_WANTED) == 0); 1787 KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) || 1788 (pg->flags & PG_CLEAN) != 0); 1789 pg->flags &= ~(PG_BUSY); 1790 UVM_PAGE_OWN(pg, NULL); 1791 1792 (void) pmap_enter(ufi->orig_map->pmap, currva, 1793 VM_PAGE_TO_PHYS(pg), 1794 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) : 1795 flt->enter_prot & MASK(ufi->entry), 1796 PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)); 1797 } 1798 1799 /* 1800 * uvm_fault_lower_io: get lower page from backing store. 1801 * 1802 * 1. unlock everything, because i/o will block. 1803 * 2. call pgo_get. 1804 * 3. if failed, recover. 1805 * 4. if succeeded, relock everything and verify things. 1806 */ 1807 1808 static int 1809 uvm_fault_lower_io( 1810 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1811 struct uvm_object **ruobj, struct vm_page **ruobjpage) 1812 { 1813 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 1814 struct uvm_object *uobj = *ruobj; 1815 struct vm_page *pg; 1816 bool locked; 1817 int gotpages; 1818 int error; 1819 voff_t uoff; 1820 UVMHIST_FUNC("uvm_fault_lower_io"); UVMHIST_CALLED(maphist); 1821 1822 /* update rusage counters */ 1823 curlwp->l_ru.ru_majflt++; 1824 1825 /* locked: maps(read), amap(if there), uobj */ 1826 uvmfault_unlockall(ufi, amap, NULL, NULL); 1827 /* locked: uobj */ 1828 1829 uvmexp.fltget++; 1830 gotpages = 1; 1831 pg = NULL; 1832 uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset; 1833 error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages, 1834 0, flt->access_type & MASK(ufi->entry), ufi->entry->advice, 1835 PGO_SYNCIO); 1836 /* locked: pg(if no error) */ 1837 1838 /* 1839 * recover from I/O 1840 */ 1841 1842 if (error) { 1843 if (error == EAGAIN) { 1844 UVMHIST_LOG(maphist, 1845 " pgo_get says TRY AGAIN!",0,0,0,0); 1846 kpause("fltagain2", false, hz/2, NULL); 1847 return ERESTART; 1848 } 1849 1850 #if 0 1851 KASSERT(error != ERESTART); 1852 #else 1853 /* XXXUEBS don't re-fault? */ 1854 if (error == ERESTART) 1855 error = EIO; 1856 #endif 1857 1858 UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)", 1859 error, 0,0,0); 1860 return error; 1861 } 1862 1863 /* locked: pg */ 1864 1865 KASSERT((pg->flags & PG_BUSY) != 0); 1866 1867 mutex_enter(&uvm_pageqlock); 1868 uvm_pageactivate(pg); 1869 mutex_exit(&uvm_pageqlock); 1870 1871 /* 1872 * re-verify the state of the world by first trying to relock 1873 * the maps. always relock the object. 1874 */ 1875 1876 locked = uvmfault_relock(ufi); 1877 if (locked && amap) 1878 amap_lock(amap); 1879 1880 /* might be changed */ 1881 uobj = pg->uobject; 1882 1883 mutex_enter(&uobj->vmobjlock); 1884 1885 /* locked(locked): maps(read), amap(if !null), uobj, pg */ 1886 /* locked(!locked): uobj, pg */ 1887 1888 /* 1889 * verify that the page has not be released and re-verify 1890 * that amap slot is still free. if there is a problem, 1891 * we unlock and clean up. 1892 */ 1893 1894 if ((pg->flags & PG_RELEASED) != 0 || 1895 (locked && amap && amap_lookup(&ufi->entry->aref, 1896 ufi->orig_rvaddr - ufi->entry->start))) { 1897 if (locked) 1898 uvmfault_unlockall(ufi, amap, NULL, NULL); 1899 locked = false; 1900 } 1901 1902 /* 1903 * didn't get the lock? release the page and retry. 1904 */ 1905 1906 if (locked == false) { 1907 UVMHIST_LOG(maphist, 1908 " wasn't able to relock after fault: retry", 1909 0,0,0,0); 1910 if (pg->flags & PG_WANTED) { 1911 wakeup(pg); 1912 } 1913 if (pg->flags & PG_RELEASED) { 1914 uvmexp.fltpgrele++; 1915 uvm_pagefree(pg); 1916 mutex_exit(&uobj->vmobjlock); 1917 return ERESTART; 1918 } 1919 pg->flags &= ~(PG_BUSY|PG_WANTED); 1920 UVM_PAGE_OWN(pg, NULL); 1921 mutex_exit(&uobj->vmobjlock); 1922 return ERESTART; 1923 } 1924 1925 /* 1926 * we have the data in pg which is busy and 1927 * not released. we are holding object lock (so the page 1928 * can't be released on us). 1929 */ 1930 1931 /* locked: maps(read), amap(if !null), uobj, pg */ 1932 1933 *ruobj = uobj; 1934 *ruobjpage = pg; 1935 return 0; 1936 } 1937 1938 /* 1939 * uvm_fault_lower_direct: fault lower center page 1940 * 1941 * 1. adjust h/w mapping protection. 1942 * 2. if page is loaned, resolve. 1943 */ 1944 1945 int 1946 uvm_fault_lower_direct( 1947 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1948 struct uvm_object *uobj, struct vm_page *uobjpage) 1949 { 1950 struct vm_page *pg; 1951 UVMHIST_FUNC("uvm_fault_lower_direct"); UVMHIST_CALLED(maphist); 1952 1953 /* 1954 * we are not promoting. if the mapping is COW ensure that we 1955 * don't give more access than we should (e.g. when doing a read 1956 * fault on a COPYONWRITE mapping we want to map the COW page in 1957 * R/O even though the entry protection could be R/W). 1958 * 1959 * set "pg" to the page we want to map in (uobjpage, usually) 1960 */ 1961 1962 uvmexp.flt_obj++; 1963 if (UVM_ET_ISCOPYONWRITE(ufi->entry) || 1964 UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject)) 1965 flt->enter_prot &= ~VM_PROT_WRITE; 1966 pg = uobjpage; /* map in the actual object */ 1967 1968 KASSERT(uobjpage != PGO_DONTCARE); 1969 1970 /* 1971 * we are faulting directly on the page. be careful 1972 * about writing to loaned pages... 1973 */ 1974 1975 if (uobjpage->loan_count) { 1976 uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage); 1977 } 1978 KASSERT(pg == uobjpage); 1979 1980 return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg, uobjpage); 1981 } 1982 1983 /* 1984 * uvm_fault_lower_direct_loan: resolve loaned page. 1985 * 1986 * 1. if not cow'ing, adjust h/w mapping protection. 1987 * 2. if cow'ing, break loan. 1988 */ 1989 1990 static int 1991 uvm_fault_lower_direct_loan( 1992 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 1993 struct uvm_object *uobj, struct vm_page **rpg, struct vm_page **ruobjpage) 1994 { 1995 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 1996 struct vm_page *pg; 1997 struct vm_page *uobjpage = *ruobjpage; 1998 UVMHIST_FUNC("uvm_fault_lower_direct_loan"); UVMHIST_CALLED(maphist); 1999 2000 if (!flt->cow_now) { 2001 /* read fault: cap the protection at readonly */ 2002 /* cap! */ 2003 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE; 2004 } else { 2005 /* write fault: must break the loan here */ 2006 2007 pg = uvm_loanbreak(uobjpage); 2008 if (pg == NULL) { 2009 2010 /* 2011 * drop ownership of page, it can't be released 2012 */ 2013 2014 if (uobjpage->flags & PG_WANTED) 2015 wakeup(uobjpage); 2016 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 2017 UVM_PAGE_OWN(uobjpage, NULL); 2018 2019 uvmfault_unlockall(ufi, amap, uobj, NULL); 2020 UVMHIST_LOG(maphist, 2021 " out of RAM breaking loan, waiting", 2022 0,0,0,0); 2023 uvmexp.fltnoram++; 2024 uvm_wait("flt_noram4"); 2025 return ERESTART; 2026 } 2027 *rpg = pg; 2028 *ruobjpage = pg; 2029 } 2030 return 0; 2031 } 2032 2033 /* 2034 * uvm_fault_lower_promote: promote lower page. 2035 * 2036 * 1. call uvmfault_promote. 2037 * 2. fill in data. 2038 * 3. if not ZFOD, dispose old page. 2039 */ 2040 2041 int 2042 uvm_fault_lower_promote( 2043 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 2044 struct uvm_object *uobj, struct vm_page *uobjpage) 2045 { 2046 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 2047 struct vm_anon *anon; 2048 struct vm_page *pg; 2049 int error; 2050 UVMHIST_FUNC("uvm_fault_lower_promote"); UVMHIST_CALLED(maphist); 2051 2052 /* 2053 * if we are going to promote the data to an anon we 2054 * allocate a blank anon here and plug it into our amap. 2055 */ 2056 #if DIAGNOSTIC 2057 if (amap == NULL) 2058 panic("uvm_fault: want to promote data, but no anon"); 2059 #endif 2060 error = uvmfault_promote(ufi, NULL, uobjpage, 2061 &anon, &flt->anon_spare); 2062 switch (error) { 2063 case 0: 2064 break; 2065 case ERESTART: 2066 return ERESTART; 2067 default: 2068 return error; 2069 } 2070 2071 pg = anon->an_page; 2072 2073 /* 2074 * fill in the data 2075 */ 2076 2077 if (uobjpage != PGO_DONTCARE) { 2078 uvmexp.flt_prcopy++; 2079 2080 /* 2081 * promote to shared amap? make sure all sharing 2082 * procs see it 2083 */ 2084 2085 if ((amap_flags(amap) & AMAP_SHARED) != 0) { 2086 pmap_page_protect(uobjpage, VM_PROT_NONE); 2087 /* 2088 * XXX: PAGE MIGHT BE WIRED! 2089 */ 2090 } 2091 2092 /* 2093 * dispose of uobjpage. it can't be PG_RELEASED 2094 * since we still hold the object lock. 2095 * drop handle to uobj as well. 2096 */ 2097 2098 if (uobjpage->flags & PG_WANTED) 2099 /* still have the obj lock */ 2100 wakeup(uobjpage); 2101 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 2102 UVM_PAGE_OWN(uobjpage, NULL); 2103 mutex_exit(&uobj->vmobjlock); 2104 uobj = NULL; 2105 2106 UVMHIST_LOG(maphist, 2107 " promote uobjpage 0x%x to anon/page 0x%x/0x%x", 2108 uobjpage, anon, pg, 0); 2109 2110 } else { 2111 uvmexp.flt_przero++; 2112 2113 /* 2114 * Page is zero'd and marked dirty by 2115 * uvmfault_promote(). 2116 */ 2117 2118 UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x", 2119 anon, pg, 0, 0); 2120 } 2121 2122 return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg, uobjpage); 2123 } 2124 2125 /* 2126 * uvm_fault_lower_enter: enter h/w mapping of lower page. 2127 */ 2128 2129 int 2130 uvm_fault_lower_enter( 2131 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 2132 struct uvm_object *uobj, 2133 struct vm_anon *anon, struct vm_page *pg, struct vm_page *uobjpage) 2134 { 2135 struct vm_amap * const amap = ufi->entry->aref.ar_amap; 2136 int error; 2137 UVMHIST_FUNC("uvm_fault_lower_enter"); UVMHIST_CALLED(maphist); 2138 2139 /* 2140 * locked: 2141 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj), 2142 * anon(if !null), pg(if anon) 2143 * 2144 * note: pg is either the uobjpage or the new page in the new anon 2145 */ 2146 KASSERT(amap == NULL || mutex_owned(&amap->am_l)); 2147 KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock)); 2148 KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0); 2149 KASSERT(anon == NULL || mutex_owned(&anon->an_lock)); 2150 KASSERT((pg->flags & PG_BUSY) != 0); 2151 2152 /* 2153 * all resources are present. we can now map it in and free our 2154 * resources. 2155 */ 2156 2157 UVMHIST_LOG(maphist, 2158 " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", 2159 ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote); 2160 KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || 2161 (pg->flags & PG_RDONLY) == 0); 2162 if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg), 2163 pg->flags & PG_RDONLY ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot, 2164 flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) { 2165 2166 /* 2167 * No need to undo what we did; we can simply think of 2168 * this as the pmap throwing away the mapping information. 2169 * 2170 * We do, however, have to go through the ReFault path, 2171 * as the map may change while we're asleep. 2172 */ 2173 2174 if (pg->flags & PG_WANTED) 2175 wakeup(pg); 2176 2177 /* 2178 * note that pg can't be PG_RELEASED since we did not drop 2179 * the object lock since the last time we checked. 2180 */ 2181 KASSERT((pg->flags & PG_RELEASED) == 0); 2182 2183 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED); 2184 UVM_PAGE_OWN(pg, NULL); 2185 2186 uvmfault_unlockall(ufi, amap, uobj, anon); 2187 if (!uvm_reclaimable()) { 2188 UVMHIST_LOG(maphist, 2189 "<- failed. out of VM",0,0,0,0); 2190 /* XXX instrumentation */ 2191 error = ENOMEM; 2192 return error; 2193 } 2194 /* XXX instrumentation */ 2195 uvm_wait("flt_pmfail2"); 2196 return ERESTART; 2197 } 2198 2199 uvm_fault_lower_done(ufi, flt, uobj, anon, pg); 2200 2201 pmap_update(ufi->orig_map->pmap); 2202 uvmfault_unlockall(ufi, amap, uobj, anon); 2203 2204 UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0); 2205 return 0; 2206 } 2207 2208 /* 2209 * uvm_fault_lower_done: queue lower center page. 2210 */ 2211 2212 void 2213 uvm_fault_lower_done( 2214 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, 2215 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg) 2216 { 2217 bool dropswap = false; 2218 2219 UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist); 2220 2221 mutex_enter(&uvm_pageqlock); 2222 if (flt->wire_paging) { 2223 uvm_pagewire(pg); 2224 if (pg->pqflags & PQ_AOBJ) { 2225 2226 /* 2227 * since the now-wired page cannot be paged out, 2228 * release its swap resources for others to use. 2229 * since an aobj page with no swap cannot be PG_CLEAN, 2230 * clear its clean flag now. 2231 */ 2232 2233 KASSERT(uobj != NULL); 2234 pg->flags &= ~(PG_CLEAN); 2235 dropswap = true; 2236 } 2237 } else { 2238 uvm_pageactivate(pg); 2239 } 2240 mutex_exit(&uvm_pageqlock); 2241 2242 if (dropswap) { 2243 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 2244 } 2245 if (pg->flags & PG_WANTED) 2246 wakeup(pg); 2247 2248 /* 2249 * note that pg can't be PG_RELEASED since we did not drop the object 2250 * lock since the last time we checked. 2251 */ 2252 KASSERT((pg->flags & PG_RELEASED) == 0); 2253 2254 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED); 2255 UVM_PAGE_OWN(pg, NULL); 2256 } 2257 2258 2259 /* 2260 * uvm_fault_wire: wire down a range of virtual addresses in a map. 2261 * 2262 * => map may be read-locked by caller, but MUST NOT be write-locked. 2263 * => if map is read-locked, any operations which may cause map to 2264 * be write-locked in uvm_fault() must be taken care of by 2265 * the caller. See uvm_map_pageable(). 2266 */ 2267 2268 int 2269 uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end, 2270 vm_prot_t access_type, int maxprot) 2271 { 2272 vaddr_t va; 2273 int error; 2274 2275 /* 2276 * now fault it in a page at a time. if the fault fails then we have 2277 * to undo what we have done. note that in uvm_fault VM_PROT_NONE 2278 * is replaced with the max protection if fault_type is VM_FAULT_WIRE. 2279 */ 2280 2281 /* 2282 * XXX work around overflowing a vaddr_t. this prevents us from 2283 * wiring the last page in the address space, though. 2284 */ 2285 if (start > end) { 2286 return EFAULT; 2287 } 2288 2289 for (va = start; va < end; va += PAGE_SIZE) { 2290 error = uvm_fault_internal(map, va, access_type, 2291 (maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE); 2292 if (error) { 2293 if (va != start) { 2294 uvm_fault_unwire(map, start, va); 2295 } 2296 return error; 2297 } 2298 } 2299 return 0; 2300 } 2301 2302 /* 2303 * uvm_fault_unwire(): unwire range of virtual space. 2304 */ 2305 2306 void 2307 uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end) 2308 { 2309 vm_map_lock_read(map); 2310 uvm_fault_unwire_locked(map, start, end); 2311 vm_map_unlock_read(map); 2312 } 2313 2314 /* 2315 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire(). 2316 * 2317 * => map must be at least read-locked. 2318 */ 2319 2320 void 2321 uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end) 2322 { 2323 struct vm_map_entry *entry; 2324 pmap_t pmap = vm_map_pmap(map); 2325 vaddr_t va; 2326 paddr_t pa; 2327 struct vm_page *pg; 2328 2329 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); 2330 2331 /* 2332 * we assume that the area we are unwiring has actually been wired 2333 * in the first place. this means that we should be able to extract 2334 * the PAs from the pmap. we also lock out the page daemon so that 2335 * we can call uvm_pageunwire. 2336 */ 2337 2338 mutex_enter(&uvm_pageqlock); 2339 2340 /* 2341 * find the beginning map entry for the region. 2342 */ 2343 2344 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map)); 2345 if (uvm_map_lookup_entry(map, start, &entry) == false) 2346 panic("uvm_fault_unwire_locked: address not in map"); 2347 2348 for (va = start; va < end; va += PAGE_SIZE) { 2349 if (pmap_extract(pmap, va, &pa) == false) 2350 continue; 2351 2352 /* 2353 * find the map entry for the current address. 2354 */ 2355 2356 KASSERT(va >= entry->start); 2357 while (va >= entry->end) { 2358 KASSERT(entry->next != &map->header && 2359 entry->next->start <= entry->end); 2360 entry = entry->next; 2361 } 2362 2363 /* 2364 * if the entry is no longer wired, tell the pmap. 2365 */ 2366 2367 if (VM_MAPENT_ISWIRED(entry) == 0) 2368 pmap_unwire(pmap, va); 2369 2370 pg = PHYS_TO_VM_PAGE(pa); 2371 if (pg) 2372 uvm_pageunwire(pg); 2373 } 2374 2375 mutex_exit(&uvm_pageqlock); 2376 } 2377