1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_fault.c 7.5 (Berkeley) 04/20/91 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Page fault handling module. 41 */ 42 43 #include "param.h" 44 45 #include "vm.h" 46 #include "vm_page.h" 47 #include "vm_pageout.h" 48 49 /* 50 * vm_fault: 51 * 52 * Handle a page fault occuring at the given address, 53 * requiring the given permissions, in the map specified. 54 * If successful, the page is inserted into the 55 * associated physical map. 56 * 57 * NOTE: the given address should be truncated to the 58 * proper page address. 59 * 60 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 61 * a standard error specifying why the fault is fatal is returned. 62 * 63 * 64 * The map in question must be referenced, and remains so. 65 * Caller may hold no locks. 66 */ 67 vm_fault(map, vaddr, fault_type, change_wiring) 68 vm_map_t map; 69 vm_offset_t vaddr; 70 vm_prot_t fault_type; 71 boolean_t change_wiring; 72 { 73 vm_object_t first_object; 74 vm_offset_t first_offset; 75 vm_map_entry_t entry; 76 register vm_object_t object; 77 register vm_offset_t offset; 78 register vm_page_t m; 79 vm_page_t first_m; 80 vm_prot_t prot; 81 int result; 82 boolean_t wired; 83 boolean_t su; 84 boolean_t lookup_still_valid; 85 boolean_t page_exists; 86 vm_page_t old_m; 87 vm_object_t next_object; 88 89 vm_stat.faults++; /* needs lock XXX */ 90 /* 91 * Recovery actions 92 */ 93 #define FREE_PAGE(m) { \ 94 PAGE_WAKEUP(m); \ 95 vm_page_lock_queues(); \ 96 vm_page_free(m); \ 97 vm_page_unlock_queues(); \ 98 } 99 100 #define RELEASE_PAGE(m) { \ 101 PAGE_WAKEUP(m); \ 102 vm_page_lock_queues(); \ 103 vm_page_activate(m); \ 104 vm_page_unlock_queues(); \ 105 } 106 107 #define UNLOCK_MAP { \ 108 if (lookup_still_valid) { \ 109 vm_map_lookup_done(map, entry); \ 110 lookup_still_valid = FALSE; \ 111 } \ 112 } 113 114 #define UNLOCK_THINGS { \ 115 object->paging_in_progress--; \ 116 vm_object_unlock(object); \ 117 if (object != first_object) { \ 118 vm_object_lock(first_object); \ 119 FREE_PAGE(first_m); \ 120 first_object->paging_in_progress--; \ 121 vm_object_unlock(first_object); \ 122 } \ 123 UNLOCK_MAP; \ 124 } 125 126 #define UNLOCK_AND_DEALLOCATE { \ 127 UNLOCK_THINGS; \ 128 vm_object_deallocate(first_object); \ 129 } 130 131 RetryFault: ; 132 133 /* 134 * Find the backing store object and offset into 135 * it to begin the search. 136 */ 137 138 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 139 &first_object, &first_offset, 140 &prot, &wired, &su)) != KERN_SUCCESS) { 141 return(result); 142 } 143 lookup_still_valid = TRUE; 144 145 if (wired) 146 fault_type = prot; 147 148 first_m = NULL; 149 150 /* 151 * Make a reference to this object to 152 * prevent its disposal while we are messing with 153 * it. Once we have the reference, the map is free 154 * to be diddled. Since objects reference their 155 * shadows (and copies), they will stay around as well. 156 */ 157 158 vm_object_lock(first_object); 159 160 first_object->ref_count++; 161 first_object->paging_in_progress++; 162 163 /* 164 * INVARIANTS (through entire routine): 165 * 166 * 1) At all times, we must either have the object 167 * lock or a busy page in some object to prevent 168 * some other thread from trying to bring in 169 * the same page. 170 * 171 * Note that we cannot hold any locks during the 172 * pager access or when waiting for memory, so 173 * we use a busy page then. 174 * 175 * Note also that we aren't as concerned about 176 * more than one thead attempting to pager_data_unlock 177 * the same page at once, so we don't hold the page 178 * as busy then, but do record the highest unlock 179 * value so far. [Unlock requests may also be delivered 180 * out of order.] 181 * 182 * 2) Once we have a busy page, we must remove it from 183 * the pageout queues, so that the pageout daemon 184 * will not grab it away. 185 * 186 * 3) To prevent another thread from racing us down the 187 * shadow chain and entering a new page in the top 188 * object before we do, we must keep a busy page in 189 * the top object while following the shadow chain. 190 * 191 * 4) We must increment paging_in_progress on any object 192 * for which we have a busy page, to prevent 193 * vm_object_collapse from removing the busy page 194 * without our noticing. 195 */ 196 197 /* 198 * Search for the page at object/offset. 199 */ 200 201 object = first_object; 202 offset = first_offset; 203 204 /* 205 * See whether this page is resident 206 */ 207 208 while (TRUE) { 209 m = vm_page_lookup(object, offset); 210 if (m != NULL) { 211 /* 212 * If the page is being brought in, 213 * wait for it and then retry. 214 */ 215 if (m->busy) { 216 #ifdef DOTHREADS 217 int wait_result; 218 219 PAGE_ASSERT_WAIT(m, !change_wiring); 220 UNLOCK_THINGS; 221 thread_block(); 222 wait_result = current_thread()->wait_result; 223 vm_object_deallocate(first_object); 224 if (wait_result != THREAD_AWAKENED) 225 return(KERN_SUCCESS); 226 goto RetryFault; 227 #else 228 PAGE_ASSERT_WAIT(m, !change_wiring); 229 UNLOCK_THINGS; 230 thread_block(); 231 vm_object_deallocate(first_object); 232 goto RetryFault; 233 #endif 234 } 235 236 if (m->absent) 237 panic("vm_fault: absent"); 238 239 /* 240 * If the desired access to this page has 241 * been locked out, request that it be unlocked. 242 */ 243 244 if (fault_type & m->page_lock) { 245 #ifdef DOTHREADS 246 int wait_result; 247 248 if ((fault_type & m->unlock_request) != fault_type) 249 panic("vm_fault: pager_data_unlock"); 250 251 PAGE_ASSERT_WAIT(m, !change_wiring); 252 UNLOCK_THINGS; 253 thread_block(); 254 wait_result = current_thread()->wait_result; 255 vm_object_deallocate(first_object); 256 if (wait_result != THREAD_AWAKENED) 257 return(KERN_SUCCESS); 258 goto RetryFault; 259 #else 260 if ((fault_type & m->unlock_request) != fault_type) 261 panic("vm_fault: pager_data_unlock"); 262 263 PAGE_ASSERT_WAIT(m, !change_wiring); 264 UNLOCK_THINGS; 265 thread_block(); 266 vm_object_deallocate(first_object); 267 goto RetryFault; 268 #endif 269 } 270 271 /* 272 * Remove the page from the pageout daemon's 273 * reach while we play with it. 274 */ 275 276 vm_page_lock_queues(); 277 if (m->inactive) { 278 queue_remove(&vm_page_queue_inactive, m, 279 vm_page_t, pageq); 280 m->inactive = FALSE; 281 vm_page_inactive_count--; 282 vm_stat.reactivations++; 283 } 284 285 if (m->active) { 286 queue_remove(&vm_page_queue_active, m, 287 vm_page_t, pageq); 288 m->active = FALSE; 289 vm_page_active_count--; 290 } 291 vm_page_unlock_queues(); 292 293 /* 294 * Mark page busy for other threads. 295 */ 296 m->busy = TRUE; 297 m->absent = FALSE; 298 break; 299 } 300 301 if (((object->pager != NULL) && 302 (!change_wiring || wired)) 303 || (object == first_object)) { 304 305 /* 306 * Allocate a new page for this object/offset 307 * pair. 308 */ 309 310 m = vm_page_alloc(object, offset); 311 312 if (m == NULL) { 313 UNLOCK_AND_DEALLOCATE; 314 VM_WAIT; 315 goto RetryFault; 316 } 317 } 318 319 if ((object->pager != NULL) && 320 (!change_wiring || wired)) { 321 int rv; 322 323 /* 324 * Now that we have a busy page, we can 325 * release the object lock. 326 */ 327 vm_object_unlock(object); 328 329 /* 330 * Call the pager to retrieve the data, if any, 331 * after releasing the lock on the map. 332 */ 333 UNLOCK_MAP; 334 335 rv = vm_pager_get(object->pager, m, TRUE); 336 if (rv == VM_PAGER_OK) { 337 /* 338 * Found the page. 339 * Leave it busy while we play with it. 340 */ 341 vm_object_lock(object); 342 343 /* 344 * Relookup in case pager changed page. 345 * Pager is responsible for disposition 346 * of old page if moved. 347 */ 348 m = vm_page_lookup(object, offset); 349 350 vm_stat.pageins++; 351 m->fake = FALSE; 352 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 353 break; 354 } 355 356 /* 357 * Remove the bogus page (which does not 358 * exist at this object/offset); before 359 * doing so, we must get back our object 360 * lock to preserve our invariant. 361 * 362 * Also wake up any other thread that may want 363 * to bring in this page. 364 * 365 * If this is the top-level object, we must 366 * leave the busy page to prevent another 367 * thread from rushing past us, and inserting 368 * the page in that object at the same time 369 * that we are. 370 */ 371 372 vm_object_lock(object); 373 /* 374 * Data outside the range of the pager; an error 375 */ 376 if (rv == VM_PAGER_BAD) { 377 FREE_PAGE(m); 378 UNLOCK_AND_DEALLOCATE; 379 return(KERN_PROTECTION_FAILURE); /* XXX */ 380 } 381 if (object != first_object) { 382 FREE_PAGE(m); 383 /* 384 * XXX - we cannot just fall out at this 385 * point, m has been freed and is invalid! 386 */ 387 } 388 } 389 390 /* 391 * We get here if the object has no pager (or unwiring) 392 * or the pager doesn't have the page. 393 */ 394 if (object == first_object) 395 first_m = m; 396 397 /* 398 * Move on to the next object. Lock the next 399 * object before unlocking the current one. 400 */ 401 402 offset += object->shadow_offset; 403 next_object = object->shadow; 404 if (next_object == NULL) { 405 /* 406 * If there's no object left, fill the page 407 * in the top object with zeros. 408 */ 409 if (object != first_object) { 410 object->paging_in_progress--; 411 vm_object_unlock(object); 412 413 object = first_object; 414 offset = first_offset; 415 m = first_m; 416 vm_object_lock(object); 417 } 418 first_m = NULL; 419 420 vm_page_zero_fill(m); 421 vm_stat.zero_fill_count++; 422 m->fake = FALSE; 423 m->absent = FALSE; 424 break; 425 } 426 else { 427 vm_object_lock(next_object); 428 if (object != first_object) 429 object->paging_in_progress--; 430 vm_object_unlock(object); 431 object = next_object; 432 object->paging_in_progress++; 433 } 434 } 435 436 if (m->absent || m->active || m->inactive || !m->busy) 437 panic("vm_fault: absent or active or inactive or not busy after main loop"); 438 439 /* 440 * PAGE HAS BEEN FOUND. 441 * [Loop invariant still holds -- the object lock 442 * is held.] 443 */ 444 445 old_m = m; /* save page that would be copied */ 446 447 /* 448 * If the page is being written, but isn't 449 * already owned by the top-level object, 450 * we have to copy it into a new page owned 451 * by the top-level object. 452 */ 453 454 if (object != first_object) { 455 /* 456 * We only really need to copy if we 457 * want to write it. 458 */ 459 460 if (fault_type & VM_PROT_WRITE) { 461 462 /* 463 * If we try to collapse first_object at this 464 * point, we may deadlock when we try to get 465 * the lock on an intermediate object (since we 466 * have the bottom object locked). We can't 467 * unlock the bottom object, because the page 468 * we found may move (by collapse) if we do. 469 * 470 * Instead, we first copy the page. Then, when 471 * we have no more use for the bottom object, 472 * we unlock it and try to collapse. 473 * 474 * Note that we copy the page even if we didn't 475 * need to... that's the breaks. 476 */ 477 478 /* 479 * We already have an empty page in 480 * first_object - use it. 481 */ 482 483 vm_page_copy(m, first_m); 484 first_m->fake = FALSE; 485 first_m->absent = FALSE; 486 487 /* 488 * If another map is truly sharing this 489 * page with us, we have to flush all 490 * uses of the original page, since we 491 * can't distinguish those which want the 492 * original from those which need the 493 * new copy. 494 */ 495 496 vm_page_lock_queues(); 497 if (!su) { 498 /* 499 * Also, once it's no longer in 500 * use by any maps, move it to 501 * the inactive queue instead. 502 */ 503 504 vm_page_deactivate(m); 505 pmap_remove_all(VM_PAGE_TO_PHYS(m)); 506 } 507 else { 508 /* 509 * Old page is only (possibly) 510 * in use by faulting map. We 511 * should do a pmap_remove on 512 * that mapping, but we know 513 * that pmap_enter will remove 514 * the old mapping before 515 * inserting the new one. 516 */ 517 vm_page_activate(m); 518 } 519 vm_page_unlock_queues(); 520 521 /* 522 * We no longer need the old page or object. 523 */ 524 PAGE_WAKEUP(m); 525 object->paging_in_progress--; 526 vm_object_unlock(object); 527 528 /* 529 * Only use the new page below... 530 */ 531 532 vm_stat.cow_faults++; 533 m = first_m; 534 object = first_object; 535 offset = first_offset; 536 537 /* 538 * Now that we've gotten the copy out of the 539 * way, let's try to collapse the top object. 540 */ 541 vm_object_lock(object); 542 /* 543 * But we have to play ugly games with 544 * paging_in_progress to do that... 545 */ 546 object->paging_in_progress--; 547 vm_object_collapse(object); 548 object->paging_in_progress++; 549 } 550 else { 551 prot &= (~VM_PROT_WRITE); 552 m->copy_on_write = TRUE; 553 } 554 } 555 556 if (m->active || m->inactive) 557 panic("vm_fault: active or inactive before copy object handling"); 558 559 /* 560 * If the page is being written, but hasn't been 561 * copied to the copy-object, we have to copy it there. 562 */ 563 RetryCopy: 564 if (first_object->copy != NULL) { 565 vm_object_t copy_object = first_object->copy; 566 vm_offset_t copy_offset; 567 vm_page_t copy_m; 568 569 /* 570 * We only need to copy if we want to write it. 571 */ 572 if ((fault_type & VM_PROT_WRITE) == 0) { 573 prot &= ~VM_PROT_WRITE; 574 m->copy_on_write = TRUE; 575 } 576 else { 577 /* 578 * Try to get the lock on the copy_object. 579 */ 580 if (!vm_object_lock_try(copy_object)) { 581 vm_object_unlock(object); 582 /* should spin a bit here... */ 583 vm_object_lock(object); 584 goto RetryCopy; 585 } 586 587 /* 588 * Make another reference to the copy-object, 589 * to keep it from disappearing during the 590 * copy. 591 */ 592 copy_object->ref_count++; 593 594 /* 595 * Does the page exist in the copy? 596 */ 597 copy_offset = first_offset 598 - copy_object->shadow_offset; 599 copy_m = vm_page_lookup(copy_object, copy_offset); 600 if (page_exists = (copy_m != NULL)) { 601 if (copy_m->busy) { 602 #ifdef DOTHREADS 603 int wait_result; 604 605 /* 606 * If the page is being brought 607 * in, wait for it and then retry. 608 */ 609 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 610 RELEASE_PAGE(m); 611 copy_object->ref_count--; 612 vm_object_unlock(copy_object); 613 UNLOCK_THINGS; 614 thread_block(); 615 wait_result = current_thread()->wait_result; 616 vm_object_deallocate(first_object); 617 if (wait_result != THREAD_AWAKENED) 618 return(KERN_SUCCESS); 619 goto RetryFault; 620 #else 621 /* 622 * If the page is being brought 623 * in, wait for it and then retry. 624 */ 625 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 626 RELEASE_PAGE(m); 627 copy_object->ref_count--; 628 vm_object_unlock(copy_object); 629 UNLOCK_THINGS; 630 thread_block(); 631 vm_object_deallocate(first_object); 632 goto RetryFault; 633 #endif 634 } 635 } 636 637 /* 638 * If the page is not in memory (in the object) 639 * and the object has a pager, we have to check 640 * if the pager has the data in secondary 641 * storage. 642 */ 643 if (!page_exists) { 644 645 /* 646 * If we don't allocate a (blank) page 647 * here... another thread could try 648 * to page it in, allocate a page, and 649 * then block on the busy page in its 650 * shadow (first_object). Then we'd 651 * trip over the busy page after we 652 * found that the copy_object's pager 653 * doesn't have the page... 654 */ 655 copy_m = vm_page_alloc(copy_object, 656 copy_offset); 657 if (copy_m == NULL) { 658 /* 659 * Wait for a page, then retry. 660 */ 661 RELEASE_PAGE(m); 662 copy_object->ref_count--; 663 vm_object_unlock(copy_object); 664 UNLOCK_AND_DEALLOCATE; 665 VM_WAIT; 666 goto RetryFault; 667 } 668 669 if (copy_object->pager != NULL) { 670 vm_object_unlock(object); 671 vm_object_unlock(copy_object); 672 UNLOCK_MAP; 673 674 page_exists = vm_pager_has_page( 675 copy_object->pager, 676 (copy_offset + copy_object->paging_offset)); 677 678 vm_object_lock(copy_object); 679 680 /* 681 * Since the map is unlocked, someone 682 * else could have copied this object 683 * and put a different copy_object 684 * between the two. Or, the last 685 * reference to the copy-object (other 686 * than the one we have) may have 687 * disappeared - if that has happened, 688 * we don't need to make the copy. 689 */ 690 if (copy_object->shadow != object || 691 copy_object->ref_count == 1) { 692 /* 693 * Gaah... start over! 694 */ 695 FREE_PAGE(copy_m); 696 vm_object_unlock(copy_object); 697 vm_object_deallocate(copy_object); 698 /* may block */ 699 vm_object_lock(object); 700 goto RetryCopy; 701 } 702 vm_object_lock(object); 703 704 if (page_exists) { 705 /* 706 * We didn't need the page 707 */ 708 FREE_PAGE(copy_m); 709 } 710 } 711 } 712 if (!page_exists) { 713 /* 714 * Must copy page into copy-object. 715 */ 716 vm_page_copy(m, copy_m); 717 copy_m->fake = FALSE; 718 copy_m->absent = FALSE; 719 720 /* 721 * Things to remember: 722 * 1. The copied page must be marked 'dirty' 723 * so it will be paged out to the copy 724 * object. 725 * 2. If the old page was in use by any users 726 * of the copy-object, it must be removed 727 * from all pmaps. (We can't know which 728 * pmaps use it.) 729 */ 730 vm_page_lock_queues(); 731 pmap_remove_all(VM_PAGE_TO_PHYS(old_m)); 732 copy_m->clean = FALSE; 733 vm_page_activate(copy_m); /* XXX */ 734 vm_page_unlock_queues(); 735 736 PAGE_WAKEUP(copy_m); 737 } 738 /* 739 * The reference count on copy_object must be 740 * at least 2: one for our extra reference, 741 * and at least one from the outside world 742 * (we checked that when we last locked 743 * copy_object). 744 */ 745 copy_object->ref_count--; 746 vm_object_unlock(copy_object); 747 m->copy_on_write = FALSE; 748 } 749 } 750 751 if (m->active || m->inactive) 752 panic("vm_fault: active or inactive before retrying lookup"); 753 754 /* 755 * We must verify that the maps have not changed 756 * since our last lookup. 757 */ 758 759 if (!lookup_still_valid) { 760 vm_object_t retry_object; 761 vm_offset_t retry_offset; 762 vm_prot_t retry_prot; 763 764 /* 765 * Since map entries may be pageable, make sure we can 766 * take a page fault on them. 767 */ 768 vm_object_unlock(object); 769 770 /* 771 * To avoid trying to write_lock the map while another 772 * thread has it read_locked (in vm_map_pageable), we 773 * do not try for write permission. If the page is 774 * still writable, we will get write permission. If it 775 * is not, or has been marked needs_copy, we enter the 776 * mapping without write permission, and will merely 777 * take another fault. 778 */ 779 result = vm_map_lookup(&map, vaddr, 780 fault_type & ~VM_PROT_WRITE, &entry, 781 &retry_object, &retry_offset, &retry_prot, 782 &wired, &su); 783 784 vm_object_lock(object); 785 786 /* 787 * If we don't need the page any longer, put it on the 788 * active list (the easiest thing to do here). If no 789 * one needs it, pageout will grab it eventually. 790 */ 791 792 if (result != KERN_SUCCESS) { 793 RELEASE_PAGE(m); 794 UNLOCK_AND_DEALLOCATE; 795 return(result); 796 } 797 798 lookup_still_valid = TRUE; 799 800 if ((retry_object != first_object) || 801 (retry_offset != first_offset)) { 802 RELEASE_PAGE(m); 803 UNLOCK_AND_DEALLOCATE; 804 goto RetryFault; 805 } 806 807 /* 808 * Check whether the protection has changed or the object 809 * has been copied while we left the map unlocked. 810 * Changing from read to write permission is OK - we leave 811 * the page write-protected, and catch the write fault. 812 * Changing from write to read permission means that we 813 * can't mark the page write-enabled after all. 814 */ 815 prot &= retry_prot; 816 if (m->copy_on_write) 817 prot &= ~VM_PROT_WRITE; 818 } 819 820 /* 821 * (the various bits we're fiddling with here are locked by 822 * the object's lock) 823 */ 824 825 /* XXX This distorts the meaning of the copy_on_write bit */ 826 827 if (prot & VM_PROT_WRITE) 828 m->copy_on_write = FALSE; 829 830 /* 831 * It's critically important that a wired-down page be faulted 832 * only once in each map for which it is wired. 833 */ 834 835 if (m->active || m->inactive) 836 panic("vm_fault: active or inactive before pmap_enter"); 837 838 vm_object_unlock(object); 839 840 /* 841 * Put this page into the physical map. 842 * We had to do the unlock above because pmap_enter 843 * may cause other faults. We don't put the 844 * page back on the active queue until later so 845 * that the page-out daemon won't find us (yet). 846 */ 847 848 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 849 prot & ~(m->page_lock), wired); 850 851 /* 852 * If the page is not wired down, then put it where the 853 * pageout daemon can find it. 854 */ 855 vm_object_lock(object); 856 vm_page_lock_queues(); 857 if (change_wiring) { 858 if (wired) 859 vm_page_wire(m); 860 else 861 vm_page_unwire(m); 862 } 863 else 864 vm_page_activate(m); 865 vm_page_unlock_queues(); 866 867 /* 868 * Unlock everything, and return 869 */ 870 871 PAGE_WAKEUP(m); 872 UNLOCK_AND_DEALLOCATE; 873 874 return(KERN_SUCCESS); 875 876 } 877 878 /* 879 * vm_fault_wire: 880 * 881 * Wire down a range of virtual addresses in a map. 882 */ 883 void vm_fault_wire(map, start, end) 884 vm_map_t map; 885 vm_offset_t start, end; 886 { 887 888 register vm_offset_t va; 889 register pmap_t pmap; 890 891 pmap = vm_map_pmap(map); 892 893 /* 894 * Inform the physical mapping system that the 895 * range of addresses may not fault, so that 896 * page tables and such can be locked down as well. 897 */ 898 899 pmap_pageable(pmap, start, end, FALSE); 900 901 /* 902 * We simulate a fault to get the page and enter it 903 * in the physical map. 904 */ 905 906 for (va = start; va < end; va += PAGE_SIZE) { 907 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 908 } 909 } 910 911 912 /* 913 * vm_fault_unwire: 914 * 915 * Unwire a range of virtual addresses in a map. 916 */ 917 void vm_fault_unwire(map, start, end) 918 vm_map_t map; 919 vm_offset_t start, end; 920 { 921 922 register vm_offset_t va, pa; 923 register pmap_t pmap; 924 925 pmap = vm_map_pmap(map); 926 927 /* 928 * Since the pages are wired down, we must be able to 929 * get their mappings from the physical map system. 930 */ 931 932 vm_page_lock_queues(); 933 934 for (va = start; va < end; va += PAGE_SIZE) { 935 pa = pmap_extract(pmap, va); 936 if (pa == (vm_offset_t) 0) { 937 panic("unwire: page not in pmap"); 938 } 939 pmap_change_wiring(pmap, va, FALSE); 940 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 941 } 942 vm_page_unlock_queues(); 943 944 /* 945 * Inform the physical mapping system that the range 946 * of addresses may fault, so that page tables and 947 * such may be unwired themselves. 948 */ 949 950 pmap_pageable(pmap, start, end, TRUE); 951 952 } 953 954 /* 955 * Routine: 956 * vm_fault_copy_entry 957 * Function: 958 * Copy all of the pages from a wired-down map entry to another. 959 * 960 * In/out conditions: 961 * The source and destination maps must be locked for write. 962 * The source map entry must be wired down (or be a sharing map 963 * entry corresponding to a main map entry that is wired down). 964 */ 965 966 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 967 vm_map_t dst_map; 968 vm_map_t src_map; 969 vm_map_entry_t dst_entry; 970 vm_map_entry_t src_entry; 971 { 972 973 vm_object_t dst_object; 974 vm_object_t src_object; 975 vm_offset_t dst_offset; 976 vm_offset_t src_offset; 977 vm_prot_t prot; 978 vm_offset_t vaddr; 979 vm_page_t dst_m; 980 vm_page_t src_m; 981 982 #ifdef lint 983 src_map++; 984 #endif lint 985 986 src_object = src_entry->object.vm_object; 987 src_offset = src_entry->offset; 988 989 /* 990 * Create the top-level object for the destination entry. 991 * (Doesn't actually shadow anything - we copy the pages 992 * directly.) 993 */ 994 dst_object = vm_object_allocate( 995 (vm_size_t) (dst_entry->end - dst_entry->start)); 996 997 dst_entry->object.vm_object = dst_object; 998 dst_entry->offset = 0; 999 1000 prot = dst_entry->max_protection; 1001 1002 /* 1003 * Loop through all of the pages in the entry's range, copying 1004 * each one from the source object (it should be there) to the 1005 * destination object. 1006 */ 1007 for (vaddr = dst_entry->start, dst_offset = 0; 1008 vaddr < dst_entry->end; 1009 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 1010 1011 /* 1012 * Allocate a page in the destination object 1013 */ 1014 vm_object_lock(dst_object); 1015 do { 1016 dst_m = vm_page_alloc(dst_object, dst_offset); 1017 if (dst_m == NULL) { 1018 vm_object_unlock(dst_object); 1019 VM_WAIT; 1020 vm_object_lock(dst_object); 1021 } 1022 } while (dst_m == NULL); 1023 1024 /* 1025 * Find the page in the source object, and copy it in. 1026 * (Because the source is wired down, the page will be 1027 * in memory.) 1028 */ 1029 vm_object_lock(src_object); 1030 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1031 if (src_m == NULL) 1032 panic("vm_fault_copy_wired: page missing"); 1033 1034 vm_page_copy(src_m, dst_m); 1035 1036 /* 1037 * Enter it in the pmap... 1038 */ 1039 vm_object_unlock(src_object); 1040 vm_object_unlock(dst_object); 1041 1042 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1043 prot, FALSE); 1044 1045 /* 1046 * Mark it no longer busy, and put it on the active list. 1047 */ 1048 vm_object_lock(dst_object); 1049 vm_page_lock_queues(); 1050 vm_page_activate(dst_m); 1051 vm_page_unlock_queues(); 1052 PAGE_WAKEUP(dst_m); 1053 vm_object_unlock(dst_object); 1054 } 1055 1056 } 1057