1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_fault.c 7.12 (Berkeley) 06/02/92 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Page fault handling module. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 #include <vm/vm_pageout.h> 49 50 /* 51 * vm_fault: 52 * 53 * Handle a page fault occuring at the given address, 54 * requiring the given permissions, in the map specified. 55 * If successful, the page is inserted into the 56 * associated physical map. 57 * 58 * NOTE: the given address should be truncated to the 59 * proper page address. 60 * 61 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 62 * a standard error specifying why the fault is fatal is returned. 63 * 64 * 65 * The map in question must be referenced, and remains so. 66 * Caller may hold no locks. 67 */ 68 int 69 vm_fault(map, vaddr, fault_type, change_wiring) 70 vm_map_t map; 71 vm_offset_t vaddr; 72 vm_prot_t fault_type; 73 boolean_t change_wiring; 74 { 75 vm_object_t first_object; 76 vm_offset_t first_offset; 77 vm_map_entry_t entry; 78 register vm_object_t object; 79 register vm_offset_t offset; 80 register vm_page_t m; 81 vm_page_t first_m; 82 vm_prot_t prot; 83 int result; 84 boolean_t wired; 85 boolean_t su; 86 boolean_t lookup_still_valid; 87 boolean_t page_exists; 88 vm_page_t old_m; 89 vm_object_t next_object; 90 91 cnt.v_vm_faults++; /* needs lock XXX */ 92 /* 93 * Recovery actions 94 */ 95 #define FREE_PAGE(m) { \ 96 PAGE_WAKEUP(m); \ 97 vm_page_lock_queues(); \ 98 vm_page_free(m); \ 99 vm_page_unlock_queues(); \ 100 } 101 102 #define RELEASE_PAGE(m) { \ 103 PAGE_WAKEUP(m); \ 104 vm_page_lock_queues(); \ 105 vm_page_activate(m); \ 106 vm_page_unlock_queues(); \ 107 } 108 109 #define UNLOCK_MAP { \ 110 if (lookup_still_valid) { \ 111 vm_map_lookup_done(map, entry); \ 112 lookup_still_valid = FALSE; \ 113 } \ 114 } 115 116 #define UNLOCK_THINGS { \ 117 object->paging_in_progress--; \ 118 vm_object_unlock(object); \ 119 if (object != first_object) { \ 120 vm_object_lock(first_object); \ 121 FREE_PAGE(first_m); \ 122 first_object->paging_in_progress--; \ 123 vm_object_unlock(first_object); \ 124 } \ 125 UNLOCK_MAP; \ 126 } 127 128 #define UNLOCK_AND_DEALLOCATE { \ 129 UNLOCK_THINGS; \ 130 vm_object_deallocate(first_object); \ 131 } 132 133 RetryFault: ; 134 135 /* 136 * Find the backing store object and offset into 137 * it to begin the search. 138 */ 139 140 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 141 &first_object, &first_offset, 142 &prot, &wired, &su)) != KERN_SUCCESS) { 143 return(result); 144 } 145 lookup_still_valid = TRUE; 146 147 if (wired) 148 fault_type = prot; 149 150 first_m = NULL; 151 152 /* 153 * Make a reference to this object to 154 * prevent its disposal while we are messing with 155 * it. Once we have the reference, the map is free 156 * to be diddled. Since objects reference their 157 * shadows (and copies), they will stay around as well. 158 */ 159 160 vm_object_lock(first_object); 161 162 first_object->ref_count++; 163 first_object->paging_in_progress++; 164 165 /* 166 * INVARIANTS (through entire routine): 167 * 168 * 1) At all times, we must either have the object 169 * lock or a busy page in some object to prevent 170 * some other thread from trying to bring in 171 * the same page. 172 * 173 * Note that we cannot hold any locks during the 174 * pager access or when waiting for memory, so 175 * we use a busy page then. 176 * 177 * Note also that we aren't as concerned about 178 * more than one thead attempting to pager_data_unlock 179 * the same page at once, so we don't hold the page 180 * as busy then, but do record the highest unlock 181 * value so far. [Unlock requests may also be delivered 182 * out of order.] 183 * 184 * 2) Once we have a busy page, we must remove it from 185 * the pageout queues, so that the pageout daemon 186 * will not grab it away. 187 * 188 * 3) To prevent another thread from racing us down the 189 * shadow chain and entering a new page in the top 190 * object before we do, we must keep a busy page in 191 * the top object while following the shadow chain. 192 * 193 * 4) We must increment paging_in_progress on any object 194 * for which we have a busy page, to prevent 195 * vm_object_collapse from removing the busy page 196 * without our noticing. 197 */ 198 199 /* 200 * Search for the page at object/offset. 201 */ 202 203 object = first_object; 204 offset = first_offset; 205 206 /* 207 * See whether this page is resident 208 */ 209 210 while (TRUE) { 211 m = vm_page_lookup(object, offset); 212 if (m != NULL) { 213 /* 214 * If the page is being brought in, 215 * wait for it and then retry. 216 */ 217 if (m->busy) { 218 #ifdef DOTHREADS 219 int wait_result; 220 221 PAGE_ASSERT_WAIT(m, !change_wiring); 222 UNLOCK_THINGS; 223 thread_block(); 224 wait_result = current_thread()->wait_result; 225 vm_object_deallocate(first_object); 226 if (wait_result != THREAD_AWAKENED) 227 return(KERN_SUCCESS); 228 goto RetryFault; 229 #else 230 PAGE_ASSERT_WAIT(m, !change_wiring); 231 UNLOCK_THINGS; 232 thread_block(); 233 vm_object_deallocate(first_object); 234 goto RetryFault; 235 #endif 236 } 237 238 if (m->absent) 239 panic("vm_fault: absent"); 240 241 /* 242 * If the desired access to this page has 243 * been locked out, request that it be unlocked. 244 */ 245 246 if (fault_type & m->page_lock) { 247 #ifdef DOTHREADS 248 int wait_result; 249 250 if ((fault_type & m->unlock_request) != fault_type) 251 panic("vm_fault: pager_data_unlock"); 252 253 PAGE_ASSERT_WAIT(m, !change_wiring); 254 UNLOCK_THINGS; 255 thread_block(); 256 wait_result = current_thread()->wait_result; 257 vm_object_deallocate(first_object); 258 if (wait_result != THREAD_AWAKENED) 259 return(KERN_SUCCESS); 260 goto RetryFault; 261 #else 262 if ((fault_type & m->unlock_request) != fault_type) 263 panic("vm_fault: pager_data_unlock"); 264 265 PAGE_ASSERT_WAIT(m, !change_wiring); 266 UNLOCK_THINGS; 267 thread_block(); 268 vm_object_deallocate(first_object); 269 goto RetryFault; 270 #endif 271 } 272 273 /* 274 * Remove the page from the pageout daemon's 275 * reach while we play with it. 276 */ 277 278 vm_page_lock_queues(); 279 if (m->inactive) { 280 queue_remove(&vm_page_queue_inactive, m, 281 vm_page_t, pageq); 282 m->inactive = FALSE; 283 cnt.v_inactive_count--; 284 cnt.v_reactivated++; 285 } 286 287 if (m->active) { 288 queue_remove(&vm_page_queue_active, m, 289 vm_page_t, pageq); 290 m->active = FALSE; 291 cnt.v_active_count--; 292 } 293 vm_page_unlock_queues(); 294 295 /* 296 * Mark page busy for other threads. 297 */ 298 m->busy = TRUE; 299 m->absent = FALSE; 300 break; 301 } 302 303 if (((object->pager != NULL) && 304 (!change_wiring || wired)) 305 || (object == first_object)) { 306 307 /* 308 * Allocate a new page for this object/offset 309 * pair. 310 */ 311 312 m = vm_page_alloc(object, offset); 313 314 if (m == NULL) { 315 UNLOCK_AND_DEALLOCATE; 316 VM_WAIT; 317 goto RetryFault; 318 } 319 } 320 321 if ((object->pager != NULL) && 322 (!change_wiring || wired)) { 323 int rv; 324 325 /* 326 * Now that we have a busy page, we can 327 * release the object lock. 328 */ 329 vm_object_unlock(object); 330 331 /* 332 * Call the pager to retrieve the data, if any, 333 * after releasing the lock on the map. 334 */ 335 UNLOCK_MAP; 336 337 rv = vm_pager_get(object->pager, m, TRUE); 338 if (rv == VM_PAGER_OK) { 339 /* 340 * Found the page. 341 * Leave it busy while we play with it. 342 */ 343 vm_object_lock(object); 344 345 /* 346 * Relookup in case pager changed page. 347 * Pager is responsible for disposition 348 * of old page if moved. 349 */ 350 m = vm_page_lookup(object, offset); 351 352 cnt.v_pageins++; 353 m->fake = FALSE; 354 m->clean = TRUE; 355 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 356 break; 357 } 358 359 /* 360 * Remove the bogus page (which does not 361 * exist at this object/offset); before 362 * doing so, we must get back our object 363 * lock to preserve our invariant. 364 * 365 * Also wake up any other thread that may want 366 * to bring in this page. 367 * 368 * If this is the top-level object, we must 369 * leave the busy page to prevent another 370 * thread from rushing past us, and inserting 371 * the page in that object at the same time 372 * that we are. 373 */ 374 375 vm_object_lock(object); 376 /* 377 * Data outside the range of the pager; an error 378 */ 379 if (rv == VM_PAGER_BAD) { 380 FREE_PAGE(m); 381 UNLOCK_AND_DEALLOCATE; 382 return(KERN_PROTECTION_FAILURE); /* XXX */ 383 } 384 if (object != first_object) { 385 FREE_PAGE(m); 386 /* 387 * XXX - we cannot just fall out at this 388 * point, m has been freed and is invalid! 389 */ 390 } 391 } 392 393 /* 394 * We get here if the object has no pager (or unwiring) 395 * or the pager doesn't have the page. 396 */ 397 if (object == first_object) 398 first_m = m; 399 400 /* 401 * Move on to the next object. Lock the next 402 * object before unlocking the current one. 403 */ 404 405 offset += object->shadow_offset; 406 next_object = object->shadow; 407 if (next_object == NULL) { 408 /* 409 * If there's no object left, fill the page 410 * in the top object with zeros. 411 */ 412 if (object != first_object) { 413 object->paging_in_progress--; 414 vm_object_unlock(object); 415 416 object = first_object; 417 offset = first_offset; 418 m = first_m; 419 vm_object_lock(object); 420 } 421 first_m = NULL; 422 423 vm_page_zero_fill(m); 424 cnt.v_zfod++; 425 m->fake = FALSE; 426 m->absent = FALSE; 427 break; 428 } 429 else { 430 vm_object_lock(next_object); 431 if (object != first_object) 432 object->paging_in_progress--; 433 vm_object_unlock(object); 434 object = next_object; 435 object->paging_in_progress++; 436 } 437 } 438 439 if (m->absent || m->active || m->inactive || !m->busy) 440 panic("vm_fault: absent or active or inactive or not busy after main loop"); 441 442 /* 443 * PAGE HAS BEEN FOUND. 444 * [Loop invariant still holds -- the object lock 445 * is held.] 446 */ 447 448 old_m = m; /* save page that would be copied */ 449 450 /* 451 * If the page is being written, but isn't 452 * already owned by the top-level object, 453 * we have to copy it into a new page owned 454 * by the top-level object. 455 */ 456 457 if (object != first_object) { 458 /* 459 * We only really need to copy if we 460 * want to write it. 461 */ 462 463 if (fault_type & VM_PROT_WRITE) { 464 465 /* 466 * If we try to collapse first_object at this 467 * point, we may deadlock when we try to get 468 * the lock on an intermediate object (since we 469 * have the bottom object locked). We can't 470 * unlock the bottom object, because the page 471 * we found may move (by collapse) if we do. 472 * 473 * Instead, we first copy the page. Then, when 474 * we have no more use for the bottom object, 475 * we unlock it and try to collapse. 476 * 477 * Note that we copy the page even if we didn't 478 * need to... that's the breaks. 479 */ 480 481 /* 482 * We already have an empty page in 483 * first_object - use it. 484 */ 485 486 vm_page_copy(m, first_m); 487 first_m->fake = FALSE; 488 first_m->absent = FALSE; 489 490 /* 491 * If another map is truly sharing this 492 * page with us, we have to flush all 493 * uses of the original page, since we 494 * can't distinguish those which want the 495 * original from those which need the 496 * new copy. 497 * 498 * XXX If we know that only one map has 499 * access to this page, then we could 500 * avoid the pmap_page_protect() call. 501 */ 502 503 vm_page_lock_queues(); 504 vm_page_activate(m); 505 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 506 vm_page_unlock_queues(); 507 508 /* 509 * We no longer need the old page or object. 510 */ 511 PAGE_WAKEUP(m); 512 object->paging_in_progress--; 513 vm_object_unlock(object); 514 515 /* 516 * Only use the new page below... 517 */ 518 519 cnt.v_cow_faults++; 520 m = first_m; 521 object = first_object; 522 offset = first_offset; 523 524 /* 525 * Now that we've gotten the copy out of the 526 * way, let's try to collapse the top object. 527 */ 528 vm_object_lock(object); 529 /* 530 * But we have to play ugly games with 531 * paging_in_progress to do that... 532 */ 533 object->paging_in_progress--; 534 vm_object_collapse(object); 535 object->paging_in_progress++; 536 } 537 else { 538 prot &= (~VM_PROT_WRITE); 539 m->copy_on_write = TRUE; 540 } 541 } 542 543 if (m->active || m->inactive) 544 panic("vm_fault: active or inactive before copy object handling"); 545 546 /* 547 * If the page is being written, but hasn't been 548 * copied to the copy-object, we have to copy it there. 549 */ 550 RetryCopy: 551 if (first_object->copy != NULL) { 552 vm_object_t copy_object = first_object->copy; 553 vm_offset_t copy_offset; 554 vm_page_t copy_m; 555 556 /* 557 * We only need to copy if we want to write it. 558 */ 559 if ((fault_type & VM_PROT_WRITE) == 0) { 560 prot &= ~VM_PROT_WRITE; 561 m->copy_on_write = TRUE; 562 } 563 else { 564 /* 565 * Try to get the lock on the copy_object. 566 */ 567 if (!vm_object_lock_try(copy_object)) { 568 vm_object_unlock(object); 569 /* should spin a bit here... */ 570 vm_object_lock(object); 571 goto RetryCopy; 572 } 573 574 /* 575 * Make another reference to the copy-object, 576 * to keep it from disappearing during the 577 * copy. 578 */ 579 copy_object->ref_count++; 580 581 /* 582 * Does the page exist in the copy? 583 */ 584 copy_offset = first_offset 585 - copy_object->shadow_offset; 586 copy_m = vm_page_lookup(copy_object, copy_offset); 587 if (page_exists = (copy_m != NULL)) { 588 if (copy_m->busy) { 589 #ifdef DOTHREADS 590 int wait_result; 591 592 /* 593 * If the page is being brought 594 * in, wait for it and then retry. 595 */ 596 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 597 RELEASE_PAGE(m); 598 copy_object->ref_count--; 599 vm_object_unlock(copy_object); 600 UNLOCK_THINGS; 601 thread_block(); 602 wait_result = current_thread()->wait_result; 603 vm_object_deallocate(first_object); 604 if (wait_result != THREAD_AWAKENED) 605 return(KERN_SUCCESS); 606 goto RetryFault; 607 #else 608 /* 609 * If the page is being brought 610 * in, wait for it and then retry. 611 */ 612 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 613 RELEASE_PAGE(m); 614 copy_object->ref_count--; 615 vm_object_unlock(copy_object); 616 UNLOCK_THINGS; 617 thread_block(); 618 vm_object_deallocate(first_object); 619 goto RetryFault; 620 #endif 621 } 622 } 623 624 /* 625 * If the page is not in memory (in the object) 626 * and the object has a pager, we have to check 627 * if the pager has the data in secondary 628 * storage. 629 */ 630 if (!page_exists) { 631 632 /* 633 * If we don't allocate a (blank) page 634 * here... another thread could try 635 * to page it in, allocate a page, and 636 * then block on the busy page in its 637 * shadow (first_object). Then we'd 638 * trip over the busy page after we 639 * found that the copy_object's pager 640 * doesn't have the page... 641 */ 642 copy_m = vm_page_alloc(copy_object, 643 copy_offset); 644 if (copy_m == NULL) { 645 /* 646 * Wait for a page, then retry. 647 */ 648 RELEASE_PAGE(m); 649 copy_object->ref_count--; 650 vm_object_unlock(copy_object); 651 UNLOCK_AND_DEALLOCATE; 652 VM_WAIT; 653 goto RetryFault; 654 } 655 656 if (copy_object->pager != NULL) { 657 vm_object_unlock(object); 658 vm_object_unlock(copy_object); 659 UNLOCK_MAP; 660 661 page_exists = vm_pager_has_page( 662 copy_object->pager, 663 (copy_offset + copy_object->paging_offset)); 664 665 vm_object_lock(copy_object); 666 667 /* 668 * Since the map is unlocked, someone 669 * else could have copied this object 670 * and put a different copy_object 671 * between the two. Or, the last 672 * reference to the copy-object (other 673 * than the one we have) may have 674 * disappeared - if that has happened, 675 * we don't need to make the copy. 676 */ 677 if (copy_object->shadow != object || 678 copy_object->ref_count == 1) { 679 /* 680 * Gaah... start over! 681 */ 682 FREE_PAGE(copy_m); 683 vm_object_unlock(copy_object); 684 vm_object_deallocate(copy_object); 685 /* may block */ 686 vm_object_lock(object); 687 goto RetryCopy; 688 } 689 vm_object_lock(object); 690 691 if (page_exists) { 692 /* 693 * We didn't need the page 694 */ 695 FREE_PAGE(copy_m); 696 } 697 } 698 } 699 if (!page_exists) { 700 /* 701 * Must copy page into copy-object. 702 */ 703 vm_page_copy(m, copy_m); 704 copy_m->fake = FALSE; 705 copy_m->absent = FALSE; 706 707 /* 708 * Things to remember: 709 * 1. The copied page must be marked 'dirty' 710 * so it will be paged out to the copy 711 * object. 712 * 2. If the old page was in use by any users 713 * of the copy-object, it must be removed 714 * from all pmaps. (We can't know which 715 * pmaps use it.) 716 */ 717 vm_page_lock_queues(); 718 pmap_page_protect(VM_PAGE_TO_PHYS(old_m), 719 VM_PROT_NONE); 720 copy_m->clean = FALSE; 721 vm_page_activate(copy_m); /* XXX */ 722 vm_page_unlock_queues(); 723 724 PAGE_WAKEUP(copy_m); 725 } 726 /* 727 * The reference count on copy_object must be 728 * at least 2: one for our extra reference, 729 * and at least one from the outside world 730 * (we checked that when we last locked 731 * copy_object). 732 */ 733 copy_object->ref_count--; 734 vm_object_unlock(copy_object); 735 m->copy_on_write = FALSE; 736 } 737 } 738 739 if (m->active || m->inactive) 740 panic("vm_fault: active or inactive before retrying lookup"); 741 742 /* 743 * We must verify that the maps have not changed 744 * since our last lookup. 745 */ 746 747 if (!lookup_still_valid) { 748 vm_object_t retry_object; 749 vm_offset_t retry_offset; 750 vm_prot_t retry_prot; 751 752 /* 753 * Since map entries may be pageable, make sure we can 754 * take a page fault on them. 755 */ 756 vm_object_unlock(object); 757 758 /* 759 * To avoid trying to write_lock the map while another 760 * thread has it read_locked (in vm_map_pageable), we 761 * do not try for write permission. If the page is 762 * still writable, we will get write permission. If it 763 * is not, or has been marked needs_copy, we enter the 764 * mapping without write permission, and will merely 765 * take another fault. 766 */ 767 result = vm_map_lookup(&map, vaddr, 768 fault_type & ~VM_PROT_WRITE, &entry, 769 &retry_object, &retry_offset, &retry_prot, 770 &wired, &su); 771 772 vm_object_lock(object); 773 774 /* 775 * If we don't need the page any longer, put it on the 776 * active list (the easiest thing to do here). If no 777 * one needs it, pageout will grab it eventually. 778 */ 779 780 if (result != KERN_SUCCESS) { 781 RELEASE_PAGE(m); 782 UNLOCK_AND_DEALLOCATE; 783 return(result); 784 } 785 786 lookup_still_valid = TRUE; 787 788 if ((retry_object != first_object) || 789 (retry_offset != first_offset)) { 790 RELEASE_PAGE(m); 791 UNLOCK_AND_DEALLOCATE; 792 goto RetryFault; 793 } 794 795 /* 796 * Check whether the protection has changed or the object 797 * has been copied while we left the map unlocked. 798 * Changing from read to write permission is OK - we leave 799 * the page write-protected, and catch the write fault. 800 * Changing from write to read permission means that we 801 * can't mark the page write-enabled after all. 802 */ 803 prot &= retry_prot; 804 if (m->copy_on_write) 805 prot &= ~VM_PROT_WRITE; 806 } 807 808 /* 809 * (the various bits we're fiddling with here are locked by 810 * the object's lock) 811 */ 812 813 /* XXX This distorts the meaning of the copy_on_write bit */ 814 815 if (prot & VM_PROT_WRITE) 816 m->copy_on_write = FALSE; 817 818 /* 819 * It's critically important that a wired-down page be faulted 820 * only once in each map for which it is wired. 821 */ 822 823 if (m->active || m->inactive) 824 panic("vm_fault: active or inactive before pmap_enter"); 825 826 vm_object_unlock(object); 827 828 /* 829 * Put this page into the physical map. 830 * We had to do the unlock above because pmap_enter 831 * may cause other faults. We don't put the 832 * page back on the active queue until later so 833 * that the page-out daemon won't find us (yet). 834 */ 835 836 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 837 prot & ~(m->page_lock), wired); 838 839 /* 840 * If the page is not wired down, then put it where the 841 * pageout daemon can find it. 842 */ 843 vm_object_lock(object); 844 vm_page_lock_queues(); 845 if (change_wiring) { 846 if (wired) 847 vm_page_wire(m); 848 else 849 vm_page_unwire(m); 850 } 851 else 852 vm_page_activate(m); 853 vm_page_unlock_queues(); 854 855 /* 856 * Unlock everything, and return 857 */ 858 859 PAGE_WAKEUP(m); 860 UNLOCK_AND_DEALLOCATE; 861 862 return(KERN_SUCCESS); 863 864 } 865 866 /* 867 * vm_fault_wire: 868 * 869 * Wire down a range of virtual addresses in a map. 870 */ 871 void vm_fault_wire(map, start, end) 872 vm_map_t map; 873 vm_offset_t start, end; 874 { 875 876 register vm_offset_t va; 877 register pmap_t pmap; 878 879 pmap = vm_map_pmap(map); 880 881 /* 882 * Inform the physical mapping system that the 883 * range of addresses may not fault, so that 884 * page tables and such can be locked down as well. 885 */ 886 887 pmap_pageable(pmap, start, end, FALSE); 888 889 /* 890 * We simulate a fault to get the page and enter it 891 * in the physical map. 892 */ 893 894 for (va = start; va < end; va += PAGE_SIZE) { 895 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 896 } 897 } 898 899 900 /* 901 * vm_fault_unwire: 902 * 903 * Unwire a range of virtual addresses in a map. 904 */ 905 void vm_fault_unwire(map, start, end) 906 vm_map_t map; 907 vm_offset_t start, end; 908 { 909 910 register vm_offset_t va, pa; 911 register pmap_t pmap; 912 913 pmap = vm_map_pmap(map); 914 915 /* 916 * Since the pages are wired down, we must be able to 917 * get their mappings from the physical map system. 918 */ 919 920 vm_page_lock_queues(); 921 922 for (va = start; va < end; va += PAGE_SIZE) { 923 pa = pmap_extract(pmap, va); 924 if (pa == (vm_offset_t) 0) { 925 panic("unwire: page not in pmap"); 926 } 927 pmap_change_wiring(pmap, va, FALSE); 928 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 929 } 930 vm_page_unlock_queues(); 931 932 /* 933 * Inform the physical mapping system that the range 934 * of addresses may fault, so that page tables and 935 * such may be unwired themselves. 936 */ 937 938 pmap_pageable(pmap, start, end, TRUE); 939 940 } 941 942 /* 943 * Routine: 944 * vm_fault_copy_entry 945 * Function: 946 * Copy all of the pages from a wired-down map entry to another. 947 * 948 * In/out conditions: 949 * The source and destination maps must be locked for write. 950 * The source map entry must be wired down (or be a sharing map 951 * entry corresponding to a main map entry that is wired down). 952 */ 953 954 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 955 vm_map_t dst_map; 956 vm_map_t src_map; 957 vm_map_entry_t dst_entry; 958 vm_map_entry_t src_entry; 959 { 960 961 vm_object_t dst_object; 962 vm_object_t src_object; 963 vm_offset_t dst_offset; 964 vm_offset_t src_offset; 965 vm_prot_t prot; 966 vm_offset_t vaddr; 967 vm_page_t dst_m; 968 vm_page_t src_m; 969 970 #ifdef lint 971 src_map++; 972 #endif lint 973 974 src_object = src_entry->object.vm_object; 975 src_offset = src_entry->offset; 976 977 /* 978 * Create the top-level object for the destination entry. 979 * (Doesn't actually shadow anything - we copy the pages 980 * directly.) 981 */ 982 dst_object = vm_object_allocate( 983 (vm_size_t) (dst_entry->end - dst_entry->start)); 984 985 dst_entry->object.vm_object = dst_object; 986 dst_entry->offset = 0; 987 988 prot = dst_entry->max_protection; 989 990 /* 991 * Loop through all of the pages in the entry's range, copying 992 * each one from the source object (it should be there) to the 993 * destination object. 994 */ 995 for (vaddr = dst_entry->start, dst_offset = 0; 996 vaddr < dst_entry->end; 997 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 998 999 /* 1000 * Allocate a page in the destination object 1001 */ 1002 vm_object_lock(dst_object); 1003 do { 1004 dst_m = vm_page_alloc(dst_object, dst_offset); 1005 if (dst_m == NULL) { 1006 vm_object_unlock(dst_object); 1007 VM_WAIT; 1008 vm_object_lock(dst_object); 1009 } 1010 } while (dst_m == NULL); 1011 1012 /* 1013 * Find the page in the source object, and copy it in. 1014 * (Because the source is wired down, the page will be 1015 * in memory.) 1016 */ 1017 vm_object_lock(src_object); 1018 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1019 if (src_m == NULL) 1020 panic("vm_fault_copy_wired: page missing"); 1021 1022 vm_page_copy(src_m, dst_m); 1023 1024 /* 1025 * Enter it in the pmap... 1026 */ 1027 vm_object_unlock(src_object); 1028 vm_object_unlock(dst_object); 1029 1030 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1031 prot, FALSE); 1032 1033 /* 1034 * Mark it no longer busy, and put it on the active list. 1035 */ 1036 vm_object_lock(dst_object); 1037 vm_page_lock_queues(); 1038 vm_page_activate(dst_m); 1039 vm_page_unlock_queues(); 1040 PAGE_WAKEUP(dst_m); 1041 vm_object_unlock(dst_object); 1042 } 1043 1044 } 1045